A clone of btpd with my configuration changes.
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

198 lignes
4.8 KiB

  1. #include <math.h>
  2. #include "btpd.h"
  3. #include "tracker_req.h"
  4. /*
  5. * Called when a peer announces it's got a new piece.
  6. *
  7. * If the piece is missing or unfull we increase the peer's
  8. * wanted level and if possible call dl_on_download.
  9. */
  10. void
  11. dl_on_piece_ann(struct peer *p, uint32_t index)
  12. {
  13. struct net *n = p->n;
  14. n->piece_count[index]++;
  15. if (cm_has_piece(n->tp, index))
  16. return;
  17. struct piece *pc = dl_find_piece(n, index);
  18. if (n->endgame) {
  19. assert(pc != NULL);
  20. peer_want(p, index);
  21. if (!peer_chokes(p) && !peer_laden(p))
  22. dl_assign_requests_eg(p);
  23. } else if (pc == NULL) {
  24. peer_want(p, index);
  25. if (!peer_chokes(p) && !peer_laden(p)) {
  26. pc = dl_new_piece(n, index);
  27. if (pc != NULL)
  28. dl_piece_assign_requests(pc, p);
  29. }
  30. } else if (!piece_full(pc)) {
  31. peer_want(p, index);
  32. if (!peer_chokes(p) && !peer_laden(p))
  33. dl_piece_assign_requests(pc, p);
  34. }
  35. }
  36. void
  37. dl_on_download(struct peer *p)
  38. {
  39. assert(peer_wanted(p));
  40. struct net *n = p->n;
  41. if (n->endgame) {
  42. dl_assign_requests_eg(p);
  43. } else {
  44. unsigned count = dl_assign_requests(p);
  45. if (count == 0 && !p->n->endgame) // We may have entered end game.
  46. assert(!peer_wanted(p) || peer_laden(p));
  47. }
  48. }
  49. void
  50. dl_on_unchoke(struct peer *p)
  51. {
  52. if (peer_wanted(p))
  53. dl_on_download(p);
  54. }
  55. void
  56. dl_on_undownload(struct peer *p)
  57. {
  58. if (!p->n->endgame)
  59. dl_unassign_requests(p);
  60. else
  61. dl_unassign_requests_eg(p);
  62. }
  63. void
  64. dl_on_choke(struct peer *p)
  65. {
  66. if (p->nreqs_out > 0)
  67. dl_on_undownload(p);
  68. }
  69. /**
  70. * Called when a piece has been tested positively.
  71. */
  72. void
  73. dl_on_ok_piece(struct net *n, uint32_t piece)
  74. {
  75. struct peer *p;
  76. struct piece *pc = dl_find_piece(n, piece);
  77. btpd_log(BTPD_L_POL, "Got piece: %u.\n", pc->index);
  78. struct net_buf *have = nb_create_have(pc->index);
  79. BTPDQ_FOREACH(p, &n->peers, p_entry)
  80. peer_send(p, have);
  81. if (n->endgame)
  82. BTPDQ_FOREACH(p, &n->peers, p_entry)
  83. if (peer_has(p, pc->index))
  84. peer_unwant(p, pc->index);
  85. assert(pc->nreqs == 0);
  86. piece_free(pc);
  87. if (cm_full(n->tp)) {
  88. btpd_log(BTPD_L_BTPD, "Finished: %s.\n", n->tp->relpath);
  89. tr_complete(n->tp);
  90. BTPDQ_FOREACH(p, &n->peers, p_entry)
  91. assert(p->nwant == 0);
  92. }
  93. }
  94. /*
  95. * Called when a piece has been tested negatively.
  96. */
  97. void
  98. dl_on_bad_piece(struct net *n, uint32_t piece)
  99. {
  100. struct piece *pc = dl_find_piece(n, piece);
  101. btpd_log(BTPD_L_ERROR, "Bad hash for piece %u of %s.\n",
  102. pc->index, n->tp->relpath);
  103. for (uint32_t i = 0; i < pc->nblocks; i++)
  104. clear_bit(pc->down_field, i);
  105. pc->ngot = 0;
  106. pc->nbusy = 0;
  107. if (n->endgame) {
  108. struct peer *p;
  109. BTPDQ_FOREACH(p, &n->peers, p_entry) {
  110. if (peer_has(p, pc->index) && peer_leech_ok(p) && !peer_laden(p))
  111. dl_assign_requests_eg(p);
  112. }
  113. } else
  114. dl_on_piece_unfull(pc); // XXX: May get bad data again.
  115. }
  116. void
  117. dl_on_new_peer(struct peer *p)
  118. {
  119. }
  120. void
  121. dl_on_lost_peer(struct peer *p)
  122. {
  123. struct net *n = p->n;
  124. for (uint32_t i = 0; i < n->tp->meta.npieces; i++)
  125. if (peer_has(p, i))
  126. n->piece_count[i]--;
  127. if (p->nreqs_out > 0)
  128. dl_on_undownload(p);
  129. }
  130. void
  131. dl_on_block(struct peer *p, struct block_request *req,
  132. uint32_t index, uint32_t begin, uint32_t length, const uint8_t *data)
  133. {
  134. struct net *n = p->n;
  135. struct block *blk = req->blk;
  136. struct piece *pc = blk->pc;
  137. cm_put_bytes(p->n->tp, index, begin, data, length);
  138. pc->ngot++;
  139. if (n->endgame) {
  140. struct block_request *req;
  141. struct net_buf *cancel = nb_create_cancel(index, begin, length);
  142. nb_hold(cancel);
  143. BTPDQ_FOREACH(req, &blk->reqs, blk_entry) {
  144. if (req->p != p)
  145. peer_cancel(req->p, req, cancel);
  146. pc->nreqs--;
  147. }
  148. nb_drop(cancel);
  149. dl_piece_reorder_eg(pc);
  150. req = BTPDQ_FIRST(&blk->reqs);
  151. while (req != NULL) {
  152. struct block_request *next = BTPDQ_NEXT(req, blk_entry);
  153. if (peer_leech_ok(req->p) && !peer_laden(req->p))
  154. dl_assign_requests_eg(req->p);
  155. free(req);
  156. req = next;
  157. }
  158. BTPDQ_INIT(&blk->reqs);
  159. if (pc->ngot == pc->nblocks)
  160. cm_test_piece(pc->n->tp, pc->index);
  161. } else {
  162. BTPDQ_REMOVE(&blk->reqs, req, blk_entry);
  163. free(req);
  164. pc->nreqs--;
  165. // XXX: Needs to be looked at if we introduce snubbing.
  166. clear_bit(pc->down_field, begin / PIECE_BLOCKLEN);
  167. pc->nbusy--;
  168. if (pc->ngot == pc->nblocks)
  169. cm_test_piece(pc->n->tp, pc->index);
  170. if (peer_leech_ok(p) && !peer_laden(p))
  171. dl_assign_requests(p);
  172. }
  173. }