A clone of btpd with my configuration changes.
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

512 lignes
13 KiB

  1. /*
  2. * The commandments:
  3. *
  4. * A peer is wanted except when it only has pieces we've already
  5. * downloaded or fully requested. Thus, a peer's wanted count is
  6. * increased for each missing or unfull piece it announces, or
  7. * when a piece it has becomes unfull.
  8. *
  9. * When a peer we want unchokes us, requests will primarily
  10. * be put on pieces we're already downloading and then on
  11. * possible new pieces.
  12. *
  13. * When choosing between several different new pieces to start
  14. * downloading, the rarest piece will be chosen.
  15. *
  16. * End game mode sets in when all missing blocks are requested.
  17. * In end game mode no piece is counted as full unless it's
  18. * downloaded.
  19. *
  20. */
  21. #include <fcntl.h>
  22. #include <math.h>
  23. #include <string.h>
  24. #include <unistd.h>
  25. #include <openssl/sha.h>
  26. #include "btpd.h"
  27. #include "stream.h"
  28. static struct piece *
  29. piece_alloc(struct net *n, uint32_t index)
  30. {
  31. assert(!has_bit(n->busy_field, index)
  32. && n->npcs_busy < n->tp->meta.npieces);
  33. struct piece *pc;
  34. size_t mem, field, blocks;
  35. unsigned nblocks;
  36. nblocks = torrent_piece_blocks(n->tp, index);
  37. blocks = sizeof(pc->blocks[0]) * nblocks;
  38. field = (size_t)ceil(nblocks / 8.0);
  39. mem = sizeof(*pc) + field + blocks;
  40. pc = btpd_calloc(1, mem);
  41. pc->n = n;
  42. pc->down_field = (uint8_t *)(pc + 1);
  43. pc->have_field = cm_get_block_field(n->tp, index);
  44. pc->index = index;
  45. pc->nblocks = nblocks;
  46. pc->nreqs = 0;
  47. pc->next_block = 0;
  48. for (unsigned i = 0; i < nblocks; i++)
  49. if (has_bit(pc->have_field, i))
  50. pc->ngot++;
  51. assert(pc->ngot < pc->nblocks);
  52. pc->blocks = (struct block *)(pc->down_field + field);
  53. for (unsigned i = 0; i < nblocks; i++) {
  54. uint32_t start = i * PIECE_BLOCKLEN;
  55. uint32_t len = torrent_block_size(n->tp, index, nblocks, i);
  56. struct block *blk = &pc->blocks[i];
  57. blk->pc = pc;
  58. BTPDQ_INIT(&blk->reqs);
  59. blk->msg = nb_create_request(index, start, len);
  60. nb_hold(blk->msg);
  61. }
  62. n->npcs_busy++;
  63. set_bit(n->busy_field, index);
  64. BTPDQ_INSERT_HEAD(&n->getlst, pc, entry);
  65. return pc;
  66. }
  67. void
  68. piece_free(struct piece *pc)
  69. {
  70. struct net *n = pc->n;
  71. assert(n->npcs_busy > 0);
  72. n->npcs_busy--;
  73. clear_bit(n->busy_field, pc->index);
  74. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  75. for (unsigned i = 0; i < pc->nblocks; i++) {
  76. struct block_request *req = BTPDQ_FIRST(&pc->blocks[i].reqs);
  77. while (req != NULL) {
  78. struct block_request *next = BTPDQ_NEXT(req, blk_entry);
  79. free(req);
  80. req = next;
  81. }
  82. nb_drop(pc->blocks[i].msg);
  83. }
  84. free(pc);
  85. }
  86. int
  87. piece_full(struct piece *pc)
  88. {
  89. return pc->ngot + pc->nbusy == pc->nblocks;
  90. }
  91. static int
  92. dl_should_enter_endgame(struct net *n)
  93. {
  94. int should;
  95. if (cm_get_npieces(n->tp) + n->npcs_busy == n->tp->meta.npieces) {
  96. should = 1;
  97. struct piece *pc;
  98. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  99. if (!piece_full(pc)) {
  100. should = 0;
  101. break;
  102. }
  103. }
  104. } else
  105. should = 0;
  106. return should;
  107. }
  108. static void
  109. dl_piece_insert_eg(struct piece *pc)
  110. {
  111. struct piece_tq *getlst = &pc->n->getlst;
  112. if (pc->nblocks == pc->ngot)
  113. BTPDQ_INSERT_TAIL(getlst, pc, entry);
  114. else {
  115. unsigned r = pc->nreqs / (pc->nblocks - pc->ngot);
  116. struct piece *it;
  117. BTPDQ_FOREACH(it, getlst, entry) {
  118. if ((it->nblocks == it->ngot
  119. || r < it->nreqs / (it->nblocks - it->ngot))) {
  120. BTPDQ_INSERT_BEFORE(it, pc, entry);
  121. break;
  122. }
  123. }
  124. if (it == NULL)
  125. BTPDQ_INSERT_TAIL(getlst, pc, entry);
  126. }
  127. }
  128. void
  129. dl_piece_reorder_eg(struct piece *pc)
  130. {
  131. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  132. dl_piece_insert_eg(pc);
  133. }
  134. static void
  135. dl_enter_endgame(struct net *n)
  136. {
  137. struct peer *p;
  138. struct piece *pc;
  139. struct piece *pcs[n->npcs_busy];
  140. unsigned pi;
  141. btpd_log(BTPD_L_POL, "Entering end game\n");
  142. n->endgame = 1;
  143. pi = 0;
  144. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  145. for (unsigned i = 0; i < pc->nblocks; i++)
  146. clear_bit(pc->down_field, i);
  147. pc->nbusy = 0;
  148. pcs[pi] = pc;
  149. pi++;
  150. }
  151. BTPDQ_INIT(&n->getlst);
  152. while (pi > 0) {
  153. pi--;
  154. dl_piece_insert_eg(pcs[pi]);
  155. }
  156. BTPDQ_FOREACH(p, &n->peers, p_entry) {
  157. assert(p->nwant == 0);
  158. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  159. if (peer_has(p, pc->index))
  160. peer_want(p, pc->index);
  161. }
  162. if (p->nwant > 0 && peer_leech_ok(p) && !peer_laden(p))
  163. dl_assign_requests_eg(p);
  164. }
  165. }
  166. struct piece *
  167. dl_find_piece(struct net *n, uint32_t index)
  168. {
  169. struct piece *pc;
  170. BTPDQ_FOREACH(pc, &n->getlst, entry)
  171. if (pc->index == index)
  172. break;
  173. return pc;
  174. }
  175. static int
  176. dl_piece_startable(struct peer *p, uint32_t index)
  177. {
  178. return peer_has(p, index) && !cm_has_piece(p->n->tp, index)
  179. && !has_bit(p->n->busy_field, index);
  180. }
  181. /*
  182. * Find the rarest piece the peer has, that isn't already allocated
  183. * for download or already downloaded. If no such piece can be found
  184. * return ENOENT.
  185. *
  186. * Return 0 or ENOENT, index in res.
  187. */
  188. static int
  189. dl_choose_rarest(struct peer *p, uint32_t *res)
  190. {
  191. uint32_t i;
  192. struct net *n = p->n;
  193. assert(n->endgame == 0);
  194. for (i = 0; i < n->tp->meta.npieces && !dl_piece_startable(p, i); i++)
  195. ;
  196. if (i == n->tp->meta.npieces)
  197. return ENOENT;
  198. uint32_t min_i = i;
  199. uint32_t min_c = 1;
  200. for(i++; i < n->tp->meta.npieces; i++) {
  201. if (dl_piece_startable(p, i)) {
  202. if (n->piece_count[i] == n->piece_count[min_i])
  203. min_c++;
  204. else if (n->piece_count[i] < n->piece_count[min_i]) {
  205. min_i = i;
  206. min_c = 1;
  207. }
  208. }
  209. }
  210. if (min_c > 1) {
  211. min_c = rand_between(1, min_c);
  212. for (i = min_i; min_c > 0; i++) {
  213. if (dl_piece_startable(p, i)
  214. && n->piece_count[i] == n->piece_count[min_i]) {
  215. min_c--;
  216. min_i = i;
  217. }
  218. }
  219. }
  220. *res = min_i;
  221. return 0;
  222. }
  223. /*
  224. * Called from dl_piece_assign_requests when a piece becomes full.
  225. * The wanted level of the peers that has this piece will be decreased.
  226. * This function is the only one that may trigger end game.
  227. */
  228. static void
  229. dl_on_piece_full(struct piece *pc)
  230. {
  231. struct peer *p;
  232. BTPDQ_FOREACH(p, &pc->n->peers, p_entry) {
  233. if (peer_has(p, pc->index))
  234. peer_unwant(p, pc->index);
  235. }
  236. if (dl_should_enter_endgame(pc->n))
  237. dl_enter_endgame(pc->n);
  238. }
  239. /*
  240. * Allocate the piece indicated by the index for download.
  241. * There's a small possibility that a piece is fully downloaded
  242. * but haven't been tested. If such is the case the piece will
  243. * be tested and NULL will be returned. Also, we might then enter
  244. * end game.
  245. *
  246. * Return the piece or NULL.
  247. */
  248. struct piece *
  249. dl_new_piece(struct net *n, uint32_t index)
  250. {
  251. btpd_log(BTPD_L_POL, "Started on piece %u.\n", index);
  252. cm_prealloc(n->tp, index);
  253. return piece_alloc(n, index);
  254. }
  255. /*
  256. * Called when a previously full piece loses a peer.
  257. * This is needed because we have decreased the wanted
  258. * level for the peers that have this piece when it got
  259. * full. Thus we have to increase the wanted level and
  260. * try to assign requests for this piece.
  261. */
  262. void
  263. dl_on_piece_unfull(struct piece *pc)
  264. {
  265. struct net *n = pc->n;
  266. struct peer *p;
  267. assert(!piece_full(pc) && n->endgame == 0);
  268. BTPDQ_FOREACH(p, &n->peers, p_entry)
  269. if (peer_has(p, pc->index))
  270. peer_want(p, pc->index);
  271. p = BTPDQ_FIRST(&n->peers);
  272. while (p != NULL && !piece_full(pc)) {
  273. if (peer_leech_ok(p) && !peer_laden(p))
  274. dl_piece_assign_requests(pc, p); // Cannot provoke end game here.
  275. p = BTPDQ_NEXT(p, p_entry);
  276. }
  277. }
  278. #define INCNEXTBLOCK(pc) \
  279. (pc)->next_block = ((pc)->next_block + 1) % (pc)->nblocks
  280. /*
  281. * Request as many blocks as possible on this piece from
  282. * the peer. If the piece becomes full we call dl_on_piece_full.
  283. *
  284. * Return the number of requests sent.
  285. */
  286. unsigned
  287. dl_piece_assign_requests(struct piece *pc, struct peer *p)
  288. {
  289. assert(!piece_full(pc) && !peer_laden(p));
  290. unsigned count = 0;
  291. do {
  292. while ((has_bit(pc->have_field, pc->next_block)
  293. || has_bit(pc->down_field, pc->next_block)))
  294. INCNEXTBLOCK(pc);
  295. struct block *blk = &pc->blocks[pc->next_block];
  296. struct block_request *req = btpd_malloc(sizeof(*req));
  297. req->p = p;
  298. req->blk = blk;
  299. BTPDQ_INSERT_TAIL(&blk->reqs, req, blk_entry);
  300. peer_request(p, req);
  301. set_bit(pc->down_field, pc->next_block);
  302. pc->nbusy++;
  303. pc->nreqs++;
  304. count++;
  305. INCNEXTBLOCK(pc);
  306. } while (!piece_full(pc) && !peer_laden(p));
  307. if (piece_full(pc))
  308. dl_on_piece_full(pc);
  309. return count;
  310. }
  311. /*
  312. * Request as many blocks as possible from the peer. Puts
  313. * requests on already active pieces before starting on new
  314. * ones. Care must be taken since end game mode may be triggered
  315. * by the calls to dl_piece_assign_requests.
  316. *
  317. * Returns number of requests sent.
  318. *
  319. * XXX: should do something smart when deciding on which
  320. * already started piece to put requests on.
  321. */
  322. unsigned
  323. dl_assign_requests(struct peer *p)
  324. {
  325. assert(!p->n->endgame && !peer_laden(p));
  326. struct piece *pc;
  327. struct net *n = p->n;
  328. unsigned count = 0;
  329. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  330. if (piece_full(pc) || !peer_has(p, pc->index))
  331. continue;
  332. count += dl_piece_assign_requests(pc, p);
  333. if (n->endgame)
  334. break;
  335. if (!piece_full(pc))
  336. assert(peer_laden(p));
  337. if (peer_laden(p))
  338. break;
  339. }
  340. while (!peer_laden(p) && !n->endgame) {
  341. uint32_t index;
  342. if (dl_choose_rarest(p, &index) == 0) {
  343. pc = dl_new_piece(n, index);
  344. if (pc != NULL)
  345. count += dl_piece_assign_requests(pc, p);
  346. } else
  347. break;
  348. }
  349. return count;
  350. }
  351. void
  352. dl_unassign_requests(struct peer *p)
  353. {
  354. while (p->nreqs_out > 0) {
  355. struct block_request *req = BTPDQ_FIRST(&p->my_reqs);
  356. struct piece *pc = req->blk->pc;
  357. int was_full = piece_full(pc);
  358. while (req != NULL) {
  359. struct block_request *next = BTPDQ_NEXT(req, p_entry);
  360. uint32_t blki = nb_get_begin(req->blk->msg) / PIECE_BLOCKLEN;
  361. struct block *blk = req->blk;
  362. // XXX: Needs to be looked at if we introduce snubbing.
  363. assert(has_bit(pc->down_field, blki));
  364. clear_bit(pc->down_field, blki);
  365. pc->nbusy--;
  366. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  367. p->nreqs_out--;
  368. BTPDQ_REMOVE(&blk->reqs, req, blk_entry);
  369. free(req);
  370. pc->nreqs--;
  371. while (next != NULL && next->blk->pc != pc)
  372. next = BTPDQ_NEXT(next, p_entry);
  373. req = next;
  374. }
  375. if (p->nreqs_out == 0)
  376. peer_on_no_reqs(p);
  377. if (was_full && !piece_full(pc))
  378. dl_on_piece_unfull(pc);
  379. }
  380. assert(BTPDQ_EMPTY(&p->my_reqs));
  381. }
  382. static void
  383. dl_piece_assign_requests_eg(struct piece *pc, struct peer *p)
  384. {
  385. unsigned first_block = pc->next_block;
  386. do {
  387. if ((has_bit(pc->have_field, pc->next_block)
  388. || peer_requested(p, &pc->blocks[pc->next_block]))) {
  389. INCNEXTBLOCK(pc);
  390. continue;
  391. }
  392. struct block_request *req = btpd_calloc(1, sizeof(*req));
  393. req->blk = &pc->blocks[pc->next_block];
  394. req->p = p;
  395. BTPDQ_INSERT_TAIL(&pc->blocks[pc->next_block].reqs, req, blk_entry);
  396. pc->nreqs++;
  397. INCNEXTBLOCK(pc);
  398. peer_request(p, req);
  399. } while (!peer_laden(p) && pc->next_block != first_block);
  400. }
  401. void
  402. dl_assign_requests_eg(struct peer *p)
  403. {
  404. assert(!peer_laden(p));
  405. struct net *n = p->n;
  406. struct piece_tq tmp;
  407. BTPDQ_INIT(&tmp);
  408. struct piece *pc = BTPDQ_FIRST(&n->getlst);
  409. while (!peer_laden(p) && pc != NULL) {
  410. struct piece *next = BTPDQ_NEXT(pc, entry);
  411. if (peer_has(p, pc->index) && pc->nblocks != pc->ngot) {
  412. dl_piece_assign_requests_eg(pc, p);
  413. BTPDQ_REMOVE(&n->getlst, pc, entry);
  414. BTPDQ_INSERT_HEAD(&tmp, pc, entry);
  415. }
  416. pc = next;
  417. }
  418. pc = BTPDQ_FIRST(&tmp);
  419. while (pc != NULL) {
  420. struct piece *next = BTPDQ_NEXT(pc, entry);
  421. dl_piece_insert_eg(pc);
  422. pc = next;
  423. }
  424. }
  425. void
  426. dl_unassign_requests_eg(struct peer *p)
  427. {
  428. struct block_request *req;
  429. struct piece *pc;
  430. struct piece_tq tmp;
  431. BTPDQ_INIT(&tmp);
  432. while (p->nreqs_out > 0) {
  433. req = BTPDQ_FIRST(&p->my_reqs);
  434. pc = req->blk->pc;
  435. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  436. BTPDQ_INSERT_HEAD(&tmp, pc, entry);
  437. while (req != NULL) {
  438. struct block_request *next = BTPDQ_NEXT(req, p_entry);
  439. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  440. p->nreqs_out--;
  441. BTPDQ_REMOVE(&req->blk->reqs, req, blk_entry);
  442. free(req);
  443. pc->nreqs--;
  444. while (next != NULL && next->blk->pc != pc)
  445. next = BTPDQ_NEXT(next, p_entry);
  446. req = next;
  447. }
  448. }
  449. assert(BTPDQ_EMPTY(&p->my_reqs));
  450. peer_on_no_reqs(p);
  451. pc = BTPDQ_FIRST(&tmp);
  452. while (pc != NULL) {
  453. struct piece *next = BTPDQ_NEXT(pc, entry);
  454. dl_piece_insert_eg(pc);
  455. pc = next;
  456. }
  457. }