A clone of btpd with my configuration changes.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

280 lines
5.9 KiB

  1. #include <sys/types.h>
  2. #include <sys/mman.h>
  3. #include "btpd.h"
  4. #include "tracker_req.h"
  5. void
  6. cm_by_second(struct torrent *tp)
  7. {
  8. if (btpd.seconds == tp->tracker_time)
  9. tracker_req(tp, TR_EMPTY);
  10. if (btpd.seconds == tp->opt_time)
  11. next_optimistic(tp, NULL);
  12. if (btpd.seconds == tp->choke_time)
  13. choke_alg(tp);
  14. struct peer *p;
  15. int ri = btpd.seconds % RATEHISTORY;
  16. BTPDQ_FOREACH(p, &tp->peers, cm_entry) {
  17. p->rate_to_me[ri] = 0;
  18. p->rate_from_me[ri] = 0;
  19. }
  20. }
  21. /*
  22. * Called when a peer announces it's got a new piece.
  23. *
  24. * If the piece is missing or unfull we increase the peer's
  25. * wanted level and if possible call cm_on_download.
  26. */
  27. void
  28. cm_on_piece_ann(struct peer *p, uint32_t index)
  29. {
  30. struct torrent *tp = p->tp;
  31. tp->piece_count[index]++;
  32. if (has_bit(tp->piece_field, index))
  33. return;
  34. struct piece *pc = cm_find_piece(tp, index);
  35. if (tp->endgame) {
  36. if (pc != NULL) {
  37. peer_want(p, index);
  38. if (!peer_chokes(p))
  39. cm_piece_assign_requests_eg(pc, p);
  40. }
  41. } else if (pc == NULL) {
  42. peer_want(p, index);
  43. if (!peer_chokes(p) && !peer_laden(p)) {
  44. pc = cm_new_piece(tp, index);
  45. if (pc != NULL)
  46. cm_piece_assign_requests(pc, p);
  47. }
  48. } else if (!piece_full(pc)) {
  49. peer_want(p, index);
  50. if (!peer_chokes(p) && !peer_laden(p))
  51. cm_piece_assign_requests(pc, p);
  52. }
  53. }
  54. void
  55. cm_on_download(struct peer *p)
  56. {
  57. assert(peer_wanted(p));
  58. struct torrent *tp = p->tp;
  59. if (tp->endgame) {
  60. cm_assign_requests_eg(p);
  61. } else {
  62. unsigned count = cm_assign_requests(p);
  63. if (count == 0 && !p->tp->endgame) // We may have entered end game.
  64. assert(!peer_wanted(p) || peer_laden(p));
  65. }
  66. }
  67. void
  68. cm_on_unchoke(struct peer *p)
  69. {
  70. if (peer_wanted(p))
  71. cm_on_download(p);
  72. }
  73. void
  74. cm_on_undownload(struct peer *p)
  75. {
  76. if (!p->tp->endgame)
  77. cm_unassign_requests(p);
  78. else
  79. cm_unassign_requests_eg(p);
  80. }
  81. void
  82. cm_on_choke(struct peer *p)
  83. {
  84. if (p->nreqs_out > 0)
  85. cm_on_undownload(p);
  86. }
  87. void
  88. cm_on_upload(struct peer *p)
  89. {
  90. choke_alg(p->tp);
  91. }
  92. void
  93. cm_on_interest(struct peer *p)
  94. {
  95. if ((p->flags & PF_I_CHOKE) == 0)
  96. cm_on_upload(p);
  97. }
  98. void
  99. cm_on_unupload(struct peer *p)
  100. {
  101. choke_alg(p->tp);
  102. }
  103. void
  104. cm_on_uninterest(struct peer *p)
  105. {
  106. if ((p->flags & PF_I_CHOKE) == 0)
  107. cm_on_unupload(p);
  108. }
  109. /**
  110. * Called when a piece has been tested positively.
  111. */
  112. void
  113. cm_on_ok_piece(struct piece *pc)
  114. {
  115. struct peer *p;
  116. struct torrent *tp = pc->tp;
  117. btpd_log(BTPD_L_POL, "Got piece: %u.\n", pc->index);
  118. set_bit(tp->piece_field, pc->index);
  119. tp->have_npieces++;
  120. msync(tp->imem, tp->isiz, MS_ASYNC);
  121. struct net_buf *have = nb_create_have(pc->index);
  122. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  123. peer_send(p, have);
  124. if (tp->endgame)
  125. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  126. if (peer_has(p, pc->index))
  127. peer_unwant(p, pc->index);
  128. piece_free(pc);
  129. if (torrent_has_all(tp)) {
  130. btpd_log(BTPD_L_BTPD, "Finished: %s.\n", tp->relpath);
  131. tracker_req(tp, TR_COMPLETED);
  132. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  133. assert(p->nwant == 0);
  134. }
  135. }
  136. /*
  137. * Called when a piece has been tested negatively.
  138. */
  139. void
  140. cm_on_bad_piece(struct piece *pc)
  141. {
  142. struct torrent *tp = pc->tp;
  143. btpd_log(BTPD_L_ERROR, "Bad hash for piece %u of %s.\n",
  144. pc->index, tp->relpath);
  145. for (uint32_t i = 0; i < pc->nblocks; i++) {
  146. clear_bit(pc->down_field, i);
  147. clear_bit(pc->have_field, i);
  148. }
  149. pc->ngot = 0;
  150. pc->nbusy = 0;
  151. msync(tp->imem, tp->isiz, MS_ASYNC);
  152. if (tp->endgame) {
  153. struct peer *p;
  154. BTPDQ_FOREACH(p, &tp->peers, cm_entry) {
  155. if (peer_has(p, pc->index) && peer_leech_ok(p))
  156. cm_piece_assign_requests_eg(pc, p);
  157. }
  158. } else
  159. cm_on_piece_unfull(pc); // XXX: May get bad data again.
  160. }
  161. void
  162. cm_on_new_peer(struct peer *p)
  163. {
  164. struct torrent *tp = p->tp;
  165. tp->npeers++;
  166. p->flags |= PF_ATTACHED;
  167. BTPDQ_REMOVE(&btpd.unattached, p, cm_entry);
  168. if (tp->npeers == 1) {
  169. BTPDQ_INSERT_HEAD(&tp->peers, p, cm_entry);
  170. next_optimistic(tp, p);
  171. } else {
  172. if (random() > RAND_MAX / 3)
  173. BTPDQ_INSERT_AFTER(&tp->peers, tp->optimistic, p, cm_entry);
  174. else
  175. BTPDQ_INSERT_TAIL(&tp->peers, p, cm_entry);
  176. }
  177. }
  178. void
  179. cm_on_lost_peer(struct peer *p)
  180. {
  181. struct torrent *tp = p->tp;
  182. tp->npeers--;
  183. p->flags &= ~PF_ATTACHED;
  184. if (tp->npeers == 0) {
  185. BTPDQ_REMOVE(&tp->peers, p, cm_entry);
  186. tp->optimistic = NULL;
  187. tp->choke_time = tp->opt_time = 0;
  188. } else if (tp->optimistic == p) {
  189. struct peer *next = BTPDQ_NEXT(p, cm_entry);
  190. BTPDQ_REMOVE(&tp->peers, p, cm_entry);
  191. next_optimistic(tp, next);
  192. } else if ((p->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT) {
  193. BTPDQ_REMOVE(&tp->peers, p, cm_entry);
  194. cm_on_unupload(p);
  195. } else {
  196. BTPDQ_REMOVE(&tp->peers, p, cm_entry);
  197. }
  198. for (uint32_t i = 0; i < tp->meta.npieces; i++)
  199. if (peer_has(p, i))
  200. tp->piece_count[i]--;
  201. if (p->nreqs_out > 0)
  202. cm_on_undownload(p);
  203. #if 0
  204. struct piece *pc = BTPDQ_FIRST(&tp->getlst);
  205. while (pc != NULL) {
  206. struct piece *next = BTPDQ_NEXT(pc, entry);
  207. if (peer_has(p, pc->index) && tp->piece_count[pc->index] == 0)
  208. cm_on_peerless_piece(pc);
  209. pc = next;
  210. }
  211. #endif
  212. }
  213. void
  214. cm_on_block(struct peer *p, uint32_t index, uint32_t begin, uint32_t length,
  215. const char *data)
  216. {
  217. struct torrent *tp = p->tp;
  218. off_t cbegin = index * p->tp->meta.piece_length + begin;
  219. torrent_put_bytes(p->tp, data, cbegin, length);
  220. struct piece *pc = cm_find_piece(tp, index);
  221. assert(pc != NULL);
  222. uint32_t block = begin / PIECE_BLOCKLEN;
  223. set_bit(pc->have_field, block);
  224. pc->ngot++;
  225. if (tp->endgame) {
  226. BTPDQ_FOREACH(p, &tp->peers, cm_entry) {
  227. if (peer_has(p, index) && p->nreqs_out > 0)
  228. peer_cancel(p, index, begin, length);
  229. }
  230. if (pc->ngot == pc->nblocks)
  231. cm_on_piece(pc);
  232. } else {
  233. // XXX: Needs to be looked at if we introduce snubbing.
  234. clear_bit(pc->down_field, block);
  235. pc->nbusy--;
  236. if (pc->ngot == pc->nblocks)
  237. cm_on_piece(pc);
  238. if (peer_leech_ok(p) && !peer_laden(p))
  239. cm_assign_requests(p);
  240. }
  241. }