A clone of btpd with my configuration changes.
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

738 lignes
17 KiB

  1. #include <sys/types.h>
  2. #include <sys/mman.h>
  3. #include <openssl/sha.h>
  4. #include <fcntl.h>
  5. #include <math.h>
  6. #include <stdio.h>
  7. #include <string.h>
  8. #include <unistd.h>
  9. #include "btpd.h"
  10. #include "stream.h"
  11. #include "tracker_req.h"
  12. #define BLOCKLEN (1 << 14)
  13. static void cm_on_piece(struct torrent *tp, struct piece *piece);
  14. static void
  15. assign_piece_requests_eg(struct piece *piece, struct peer *peer)
  16. {
  17. for (unsigned i = 0; i < piece->nblocks; i++) {
  18. if (!has_bit(piece->have_field, i)) {
  19. uint32_t start = i * BLOCKLEN;
  20. uint32_t len;
  21. if (i < piece->nblocks - 1)
  22. len = BLOCKLEN;
  23. else if (piece->index < peer->tp->meta.npieces - 1)
  24. len = peer->tp->meta.piece_length - i * BLOCKLEN;
  25. else {
  26. off_t piece_len =
  27. peer->tp->meta.total_length -
  28. peer->tp->meta.piece_length *
  29. (peer->tp->meta.npieces - 1);
  30. len = piece_len - i * BLOCKLEN;
  31. }
  32. peer_request(peer, piece->index, start, len);
  33. }
  34. }
  35. }
  36. static void
  37. cm_assign_requests_eg(struct peer *peer)
  38. {
  39. struct piece *piece;
  40. BTPDQ_FOREACH(piece, &peer->tp->getlst, entry) {
  41. if (has_bit(peer->piece_field, piece->index)) {
  42. peer_want(peer, piece->index);
  43. if ((peer->flags & PF_P_CHOKE) == 0)
  44. assign_piece_requests_eg(piece, peer);
  45. }
  46. }
  47. }
  48. static void
  49. cm_unassign_requests_eg(struct peer *peer)
  50. {
  51. struct piece_req *req = BTPDQ_FIRST(&peer->my_reqs);
  52. while (req != NULL) {
  53. struct piece_req *next = BTPDQ_NEXT(req, entry);
  54. free(req);
  55. req = next;
  56. }
  57. BTPDQ_INIT(&peer->my_reqs);
  58. }
  59. static void
  60. cm_enter_endgame(struct torrent *tp)
  61. {
  62. struct peer *peer;
  63. btpd_log(BTPD_L_POL, "Entering end game\n");
  64. tp->endgame = 1;
  65. BTPDQ_FOREACH(peer, &tp->peers, cm_entry)
  66. cm_assign_requests_eg(peer);
  67. }
  68. static int
  69. piece_full(struct piece *p)
  70. {
  71. return p->ngot + p->nbusy == p->nblocks;
  72. }
  73. static int
  74. cm_should_schedule(struct torrent *tp)
  75. {
  76. if (!tp->endgame) {
  77. int should = 1;
  78. struct piece *p = BTPDQ_FIRST(&tp->getlst);
  79. while (p != NULL) {
  80. if (!piece_full(p)) {
  81. should = 0;
  82. break;
  83. }
  84. p = BTPDQ_NEXT(p, entry);
  85. }
  86. return should;
  87. } else
  88. return 0;
  89. }
  90. static void
  91. cm_on_peerless_piece(struct torrent *tp, struct piece *piece)
  92. {
  93. if (!tp->endgame) {
  94. assert(tp->piece_count[piece->index] == 0);
  95. btpd_log(BTPD_L_POL, "peerless piece %u\n", piece->index);
  96. msync(tp->imem, tp->isiz, MS_ASYNC);
  97. BTPDQ_REMOVE(&tp->getlst, piece, entry);
  98. free(piece);
  99. if (cm_should_schedule(tp))
  100. cm_schedule_piece(tp);
  101. }
  102. }
  103. static int
  104. rate_cmp(unsigned long rate1, unsigned long rate2)
  105. {
  106. if (rate1 < rate2)
  107. return -1;
  108. else if (rate1 == rate2)
  109. return 0;
  110. else
  111. return 1;
  112. }
  113. static int
  114. dwnrate_cmp(const void *p1, const void *p2)
  115. {
  116. unsigned long rate1 = peer_get_rate((*(struct peer **)p1)->rate_to_me);
  117. unsigned long rate2 = peer_get_rate((*(struct peer **)p2)->rate_to_me);
  118. return rate_cmp(rate1, rate2);
  119. }
  120. static int
  121. uprate_cmp(const void *p1, const void *p2)
  122. {
  123. unsigned long rate1 = peer_get_rate((*(struct peer **)p1)->rate_from_me);
  124. unsigned long rate2 = peer_get_rate((*(struct peer **)p2)->rate_from_me);
  125. return rate_cmp(rate1, rate2);
  126. }
  127. static void
  128. choke_alg(struct torrent *tp)
  129. {
  130. int i;
  131. struct peer *p;
  132. struct peer **psort;
  133. assert(tp->npeers > 0);
  134. psort = (struct peer **)btpd_malloc(tp->npeers * sizeof(p));
  135. i = 0;
  136. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  137. psort[i++] = p;
  138. if (tp->have_npieces == tp->meta.npieces)
  139. qsort(psort, tp->npeers, sizeof(p), uprate_cmp);
  140. else
  141. qsort(psort, tp->npeers, sizeof(p), dwnrate_cmp);
  142. tp->ndown = 0;
  143. if (tp->optimistic != NULL) {
  144. if (tp->optimistic->flags & PF_I_CHOKE)
  145. peer_unchoke(tp->optimistic);
  146. if (tp->optimistic->flags & PF_P_WANT)
  147. tp->ndown = 1;
  148. }
  149. for (i = tp->npeers - 1; i >= 0; i--) {
  150. if (psort[i] == tp->optimistic)
  151. continue;
  152. if (tp->ndown < 4) {
  153. if (psort[i]->flags & PF_P_WANT)
  154. tp->ndown++;
  155. if (psort[i]->flags & PF_I_CHOKE)
  156. peer_unchoke(psort[i]);
  157. } else {
  158. if ((psort[i]->flags & PF_I_CHOKE) == 0)
  159. peer_choke(psort[i]);
  160. }
  161. }
  162. free(psort);
  163. tp->choke_time = btpd.seconds + 10;
  164. }
  165. static void
  166. next_optimistic(struct torrent *tp, struct peer *np)
  167. {
  168. if (np != NULL)
  169. tp->optimistic = np;
  170. else if (tp->optimistic == NULL)
  171. tp->optimistic = BTPDQ_FIRST(&tp->peers);
  172. else {
  173. np = BTPDQ_NEXT(tp->optimistic, cm_entry);
  174. if (np != NULL)
  175. tp->optimistic = np;
  176. else
  177. tp->optimistic = BTPDQ_FIRST(&tp->peers);
  178. }
  179. assert(tp->optimistic != NULL);
  180. choke_alg(tp);
  181. tp->opt_time = btpd.seconds + 30;
  182. }
  183. void
  184. cm_on_upload(struct peer *peer)
  185. {
  186. choke_alg(peer->tp);
  187. }
  188. void
  189. cm_on_unupload(struct peer *peer)
  190. {
  191. choke_alg(peer->tp);
  192. }
  193. void
  194. cm_on_interest(struct peer *peer)
  195. {
  196. if ((peer->flags & PF_I_CHOKE) == 0)
  197. cm_on_upload(peer);
  198. }
  199. void
  200. cm_on_uninterest(struct peer *peer)
  201. {
  202. if ((peer->flags & PF_I_CHOKE) == 0)
  203. cm_on_unupload(peer);
  204. }
  205. void
  206. cm_by_second(struct torrent *tp)
  207. {
  208. if (btpd.seconds == tp->tracker_time)
  209. tracker_req(tp, TR_EMPTY);
  210. if (btpd.seconds == tp->opt_time)
  211. next_optimistic(tp, NULL);
  212. if (btpd.seconds == tp->choke_time)
  213. choke_alg(tp);
  214. }
  215. void
  216. cm_on_download(struct peer *peer)
  217. {
  218. if (!peer->tp->endgame)
  219. assert(cm_assign_requests(peer, 5) != 0);
  220. else
  221. cm_assign_requests_eg(peer);
  222. }
  223. void
  224. cm_on_undownload(struct peer *peer)
  225. {
  226. if (!peer->tp->endgame)
  227. cm_unassign_requests(peer);
  228. else
  229. cm_unassign_requests_eg(peer);
  230. }
  231. void
  232. cm_on_unchoke(struct peer *peer)
  233. {
  234. if ((peer->flags & PF_I_WANT) != 0)
  235. cm_on_download(peer);
  236. }
  237. void
  238. cm_on_choke(struct peer *peer)
  239. {
  240. if ((peer->flags & PF_I_WANT) != 0)
  241. cm_on_undownload(peer);
  242. }
  243. void
  244. cm_on_piece_ann(struct peer *peer, uint32_t piece)
  245. {
  246. struct piece *p;
  247. struct torrent *tp = peer->tp;
  248. tp->piece_count[piece]++;
  249. if (has_bit(tp->piece_field, piece))
  250. return;
  251. p = BTPDQ_FIRST(&tp->getlst);
  252. while (p != NULL && p->index != piece)
  253. p = BTPDQ_NEXT(p, entry);
  254. if (p != NULL && tp->endgame) {
  255. peer_want(peer, p->index);
  256. if ((peer->flags & PF_P_CHOKE) == 0)
  257. cm_on_download(peer);
  258. } else if (p != NULL && !piece_full(p)) {
  259. peer_want(peer, p->index);
  260. if ((peer->flags & PF_P_CHOKE) == 0 && BTPDQ_EMPTY(&peer->my_reqs))
  261. cm_on_download(peer);
  262. } else if (p == NULL && cm_should_schedule(tp))
  263. cm_schedule_piece(tp);
  264. }
  265. void
  266. cm_on_lost_peer(struct peer *peer)
  267. {
  268. struct torrent *tp = peer->tp;
  269. struct piece *piece;
  270. tp->npeers--;
  271. peer->flags &= ~PF_ATTACHED;
  272. if (tp->npeers == 0) {
  273. BTPDQ_REMOVE(&tp->peers, peer, cm_entry);
  274. tp->optimistic = NULL;
  275. tp->choke_time = tp->opt_time = 0;
  276. } else if (tp->optimistic == peer) {
  277. struct peer *next = BTPDQ_NEXT(peer, cm_entry);
  278. BTPDQ_REMOVE(&tp->peers, peer, cm_entry);
  279. next_optimistic(peer->tp, next);
  280. } else if ((peer->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT) {
  281. BTPDQ_REMOVE(&tp->peers, peer, cm_entry);
  282. cm_on_unupload(peer);
  283. } else {
  284. BTPDQ_REMOVE(&tp->peers, peer, cm_entry);
  285. }
  286. for (size_t i = 0; i < peer->tp->meta.npieces; i++)
  287. if (has_bit(peer->piece_field, i))
  288. tp->piece_count[i]--;
  289. if ((peer->flags & (PF_I_WANT|PF_P_CHOKE)) == PF_I_WANT)
  290. cm_on_undownload(peer);
  291. piece = BTPDQ_FIRST(&tp->getlst);
  292. while (piece != NULL) {
  293. struct piece *next = BTPDQ_NEXT(piece, entry);
  294. if (has_bit(peer->piece_field, piece->index) &&
  295. tp->piece_count[piece->index] == 0)
  296. cm_on_peerless_piece(tp, piece);
  297. piece = next;
  298. }
  299. }
  300. void
  301. cm_on_new_peer(struct peer *peer)
  302. {
  303. struct torrent *tp = peer->tp;
  304. tp->npeers++;
  305. peer->flags |= PF_ATTACHED;
  306. BTPDQ_REMOVE(&btpd.unattached, peer, cm_entry);
  307. if (tp->npeers == 1) {
  308. BTPDQ_INSERT_HEAD(&tp->peers, peer, cm_entry);
  309. next_optimistic(peer->tp, peer);
  310. } else {
  311. if (random() > RAND_MAX / 3)
  312. BTPDQ_INSERT_AFTER(&tp->peers, tp->optimistic, peer, cm_entry);
  313. else
  314. BTPDQ_INSERT_TAIL(&tp->peers, peer, cm_entry);
  315. }
  316. }
  317. static int
  318. missing_piece(struct torrent *tp, uint32_t index)
  319. {
  320. struct piece *p;
  321. if (has_bit(tp->piece_field, index))
  322. return 0;
  323. BTPDQ_FOREACH(p, &tp->getlst, entry)
  324. if (p->index == index)
  325. return 0;
  326. return 1;
  327. }
  328. static struct piece *
  329. alloc_piece(struct torrent *tp, uint32_t piece)
  330. {
  331. struct piece *res;
  332. size_t mem, field;
  333. unsigned long nblocks;
  334. off_t piece_length = tp->meta.piece_length;
  335. if (piece == tp->meta.npieces - 1) {
  336. off_t totl = tp->meta.total_length;
  337. off_t npm1 = tp->meta.npieces - 1;
  338. piece_length = totl - npm1 * piece_length;
  339. }
  340. nblocks = (unsigned)ceil((double)piece_length / BLOCKLEN);
  341. field = (size_t)ceil(nblocks / 8.0);
  342. mem = sizeof(*res) + field;
  343. res = btpd_calloc(1, mem);
  344. res->down_field = (uint8_t *)res + sizeof(*res);
  345. res->have_field =
  346. tp->block_field +
  347. (size_t)ceil(piece * tp->meta.piece_length / (double)(1 << 17));
  348. res->nblocks = nblocks;
  349. res->index = piece;
  350. for (unsigned i = 0; i < nblocks; i++)
  351. if (has_bit(res->have_field, i))
  352. res->ngot++;
  353. return res;
  354. }
  355. static void
  356. activate_piece_peers(struct torrent *tp, struct piece *piece)
  357. {
  358. struct peer *peer;
  359. assert(!piece_full(piece) && tp->endgame == 0);
  360. BTPDQ_FOREACH(peer, &tp->peers, cm_entry)
  361. if (has_bit(peer->piece_field, piece->index))
  362. peer_want(peer, piece->index);
  363. peer = BTPDQ_FIRST(&tp->peers);
  364. while (peer != NULL && !piece_full(piece)) {
  365. if ((peer->flags & (PF_P_CHOKE|PF_I_WANT)) == PF_I_WANT &&
  366. BTPDQ_EMPTY(&peer->my_reqs)) {
  367. //
  368. cm_on_download(peer);
  369. }
  370. peer = BTPDQ_NEXT(peer, cm_entry);
  371. }
  372. }
  373. void
  374. cm_schedule_piece(struct torrent *tp)
  375. {
  376. uint32_t i;
  377. uint32_t min_i;
  378. unsigned min_c;
  379. struct piece *piece;
  380. int enter_end_game = 1;
  381. assert(tp->endgame == 0);
  382. for (i = 0; i < tp->meta.npieces; i++)
  383. if (missing_piece(tp, i)) {
  384. enter_end_game = 0;
  385. if (tp->piece_count[i] > 0)
  386. break;
  387. }
  388. if (i == tp->meta.npieces) {
  389. if (enter_end_game)
  390. cm_enter_endgame(tp);
  391. return;
  392. }
  393. min_i = i;
  394. min_c = 1;
  395. for(i++; i < tp->meta.npieces; i++) {
  396. if (missing_piece(tp, i) && tp->piece_count[i] > 0) {
  397. if (tp->piece_count[i] == tp->piece_count[min_i])
  398. min_c++;
  399. else if (tp->piece_count[i] < tp->piece_count[min_i]) {
  400. min_i = i;
  401. min_c = 1;
  402. }
  403. }
  404. }
  405. if (min_c > 1) {
  406. min_c = 1 + rint((double)random() * (min_c - 1) / RAND_MAX);
  407. for (i = min_i; min_c > 0; i++) {
  408. if (missing_piece(tp, i) &&
  409. tp->piece_count[i] == tp->piece_count[min_i]) {
  410. //
  411. min_c--;
  412. min_i = i;
  413. }
  414. }
  415. }
  416. btpd_log(BTPD_L_POL, "scheduled piece: %u.\n", min_i);
  417. piece = alloc_piece(tp, min_i);
  418. BTPDQ_INSERT_HEAD(&tp->getlst, piece, entry);
  419. if (piece->ngot == piece->nblocks) {
  420. cm_on_piece(tp, piece);
  421. if (cm_should_schedule(tp))
  422. cm_schedule_piece(tp);
  423. } else
  424. activate_piece_peers(tp, piece);
  425. }
  426. static void
  427. cm_on_piece_unfull(struct torrent *tp, struct piece *piece)
  428. {
  429. activate_piece_peers(tp, piece);
  430. }
  431. static void
  432. cm_on_piece_full(struct torrent *tp, struct piece *piece)
  433. {
  434. struct peer *p;
  435. if (cm_should_schedule(tp))
  436. cm_schedule_piece(tp);
  437. BTPDQ_FOREACH(p, &tp->peers, cm_entry) {
  438. if (has_bit(p->piece_field, piece->index))
  439. peer_unwant(p, piece->index);
  440. }
  441. }
  442. static int
  443. cm_assign_request(struct peer *peer)
  444. {
  445. struct piece *piece;
  446. unsigned i;
  447. uint32_t start, len;
  448. piece = BTPDQ_FIRST(&peer->tp->getlst);
  449. while (piece != NULL) {
  450. if (!piece_full(piece) && has_bit(peer->piece_field, piece->index))
  451. break;
  452. piece = BTPDQ_NEXT(piece, entry);
  453. }
  454. if (piece == NULL)
  455. return 0;
  456. i = 0;
  457. while(has_bit(piece->have_field, i) || has_bit(piece->down_field, i))
  458. i++;
  459. start = i * BLOCKLEN;
  460. if (i < piece->nblocks - 1)
  461. len = BLOCKLEN;
  462. else if (piece->index < peer->tp->meta.npieces - 1)
  463. len = peer->tp->meta.piece_length - i * BLOCKLEN;
  464. else {
  465. off_t piece_len =
  466. peer->tp->meta.total_length -
  467. peer->tp->meta.piece_length * (peer->tp->meta.npieces - 1);
  468. len = piece_len - i * BLOCKLEN;
  469. }
  470. peer_request(peer, piece->index, start, len);
  471. set_bit(piece->down_field, i);
  472. piece->nbusy++;
  473. if (piece_full(piece))
  474. cm_on_piece_full(peer->tp, piece);
  475. return 1;
  476. }
  477. int
  478. cm_assign_requests(struct peer *peer, int nreqs)
  479. {
  480. int onreqs = nreqs;
  481. while (nreqs > 0 && cm_assign_request(peer))
  482. nreqs--;
  483. return onreqs - nreqs;
  484. }
  485. void
  486. cm_unassign_requests(struct peer *peer)
  487. {
  488. struct torrent *tp = peer->tp;
  489. struct piece *piece = BTPDQ_FIRST(&tp->getlst);
  490. while (piece != NULL) {
  491. int was_full = piece_full(piece);
  492. struct piece_req *req = BTPDQ_FIRST(&peer->my_reqs);
  493. while (req != NULL) {
  494. struct piece_req *next = BTPDQ_NEXT(req, entry);
  495. if (piece->index == req->index) {
  496. assert(has_bit(piece->down_field, req->begin / BLOCKLEN));
  497. clear_bit(piece->down_field, req->begin / BLOCKLEN);
  498. piece->nbusy--;
  499. BTPDQ_REMOVE(&peer->my_reqs, req, entry);
  500. free(req);
  501. }
  502. req = next;
  503. }
  504. if (was_full && !piece_full(piece))
  505. cm_on_piece_unfull(tp, piece);
  506. piece = BTPDQ_NEXT(piece, entry);
  507. }
  508. assert(BTPDQ_EMPTY(&peer->my_reqs));
  509. }
  510. static int
  511. test_hash(struct torrent *tp, uint8_t *hash, unsigned long index)
  512. {
  513. if (tp->meta.piece_hash != NULL)
  514. return memcmp(hash, tp->meta.piece_hash[index], SHA_DIGEST_LENGTH);
  515. else {
  516. char piece_hash[SHA_DIGEST_LENGTH];
  517. int fd;
  518. int bufi;
  519. int err;
  520. err = vopen(&fd, O_RDONLY, "%s", tp->relpath);
  521. if (err != 0)
  522. btpd_err("test_hash: %s\n", strerror(err));
  523. err = lseek(fd, tp->meta.pieces_off + index * SHA_DIGEST_LENGTH,
  524. SEEK_SET);
  525. if (err < 0)
  526. btpd_err("test_hash: %s\n", strerror(errno));
  527. bufi = 0;
  528. while (bufi < SHA_DIGEST_LENGTH) {
  529. ssize_t nread =
  530. read(fd, piece_hash + bufi, SHA_DIGEST_LENGTH - bufi);
  531. bufi += nread;
  532. }
  533. close(fd);
  534. return memcmp(hash, piece_hash, SHA_DIGEST_LENGTH);
  535. }
  536. }
  537. static int
  538. ro_fd_cb(const char *path, int *fd, void *arg)
  539. {
  540. struct torrent *tp = arg;
  541. return vopen(fd, O_RDONLY, "%s.d/%s", tp->relpath, path);
  542. }
  543. static void
  544. cm_on_piece(struct torrent *tp, struct piece *piece)
  545. {
  546. int err;
  547. uint8_t hash[20];
  548. struct bt_stream_ro *bts;
  549. off_t plen = tp->meta.piece_length;
  550. if (piece->index == tp->meta.npieces - 1) {
  551. plen =
  552. tp->meta.total_length -
  553. tp->meta.piece_length * (tp->meta.npieces - 1);
  554. }
  555. if ((bts = bts_open_ro(&tp->meta, piece->index * tp->meta.piece_length,
  556. ro_fd_cb, tp)) == NULL)
  557. btpd_err("Out of memory.\n");
  558. if ((err = bts_sha(bts, plen, hash)) != 0)
  559. btpd_err("Ouch! %s\n", strerror(err));
  560. bts_close_ro(bts);
  561. if (test_hash(tp, hash, piece->index) == 0) {
  562. btpd_log(BTPD_L_POL, "Got piece: %u.\n", piece->index);
  563. struct peer *p;
  564. set_bit(tp->piece_field, piece->index);
  565. tp->have_npieces++;
  566. if (tp->have_npieces == tp->meta.npieces) {
  567. btpd_log(BTPD_L_BTPD, "Finished: %s.\n", tp->relpath);
  568. tracker_req(tp, TR_COMPLETED);
  569. }
  570. msync(tp->imem, tp->isiz, MS_ASYNC);
  571. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  572. peer_have(p, piece->index);
  573. if (tp->endgame)
  574. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  575. peer_unwant(p, piece->index);
  576. BTPDQ_REMOVE(&tp->getlst, piece, entry);
  577. free(piece);
  578. } else if (tp->endgame) {
  579. struct peer *p;
  580. btpd_log(BTPD_L_ERROR, "Bad hash for piece %u of %s.\n",
  581. piece->index, tp->relpath);
  582. for (unsigned i = 0; i < piece->nblocks; i++)
  583. clear_bit(piece->have_field, i);
  584. piece->ngot = 0;
  585. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  586. if (has_bit(p->piece_field, piece->index) &&
  587. (p->flags & PF_P_CHOKE) == 0) {
  588. //
  589. assign_piece_requests_eg(piece, p);
  590. }
  591. } else {
  592. btpd_log(BTPD_L_ERROR, "Bad hash for piece %u of %s.\n",
  593. piece->index, tp->relpath);
  594. for (unsigned i = 0; i < piece->nblocks; i++) {
  595. clear_bit(piece->have_field, i);
  596. assert(!has_bit(piece->down_field, i));
  597. }
  598. msync(tp->imem, tp->isiz, MS_ASYNC);
  599. BTPDQ_REMOVE(&tp->getlst, piece, entry);
  600. free(piece);
  601. if (cm_should_schedule(tp))
  602. cm_schedule_piece(tp);
  603. }
  604. }
  605. void
  606. cm_on_block(struct peer *peer)
  607. {
  608. struct torrent *tp = peer->tp;
  609. struct piece_req *req = BTPDQ_FIRST(&peer->my_reqs);
  610. struct piece *piece = BTPDQ_FIRST(&tp->getlst);
  611. unsigned block = req->begin / BLOCKLEN;
  612. while (piece != NULL && piece->index != req->index)
  613. piece = BTPDQ_NEXT(piece, entry);
  614. set_bit(piece->have_field, block);
  615. clear_bit(piece->down_field, block);
  616. piece->ngot++;
  617. piece->nbusy--;
  618. if (tp->endgame) {
  619. uint32_t index = req->index;
  620. uint32_t begin = req->begin;
  621. uint32_t length = req->length;
  622. struct peer *p;
  623. BTPDQ_REMOVE(&peer->my_reqs, req, entry);
  624. free(req);
  625. BTPDQ_FOREACH(p, &tp->peers, cm_entry) {
  626. if (has_bit(p->piece_field, index) &&
  627. (peer->flags & PF_P_CHOKE) == 0)
  628. peer_cancel(p, index, begin, length);
  629. }
  630. if (piece->ngot == piece->nblocks)
  631. cm_on_piece(tp, piece);
  632. } else {
  633. BTPDQ_REMOVE(&peer->my_reqs, req, entry);
  634. free(req);
  635. if (piece->ngot == piece->nblocks)
  636. cm_on_piece(tp, piece);
  637. if ((peer->flags & (PF_I_WANT|PF_P_CHOKE)) == PF_I_WANT)
  638. cm_assign_requests(peer, 1);
  639. }
  640. }