A clone of btpd with my configuration changes.
Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

526 рядки
14 KiB

  1. /*
  2. * The commandments:
  3. *
  4. * A peer is wanted except when it only has pieces we've already
  5. * downloaded or fully requested. Thus, a peer's wanted count is
  6. * increased for each missing or unfull piece it announces, or
  7. * when a piece it has becomes unfull.
  8. *
  9. * When a peer we want unchokes us, requests will primarily
  10. * be put on pieces we're already downloading and then on
  11. * possible new pieces.
  12. *
  13. * When choosing between several different new pieces to start
  14. * downloading, the rarest piece will be chosen.
  15. *
  16. * End game mode sets in when all missing blocks are requested.
  17. * In end game mode no piece is counted as full unless it's
  18. * downloaded.
  19. *
  20. */
  21. #include <fcntl.h>
  22. #include <math.h>
  23. #include <string.h>
  24. #include <unistd.h>
  25. #include <openssl/sha.h>
  26. #include "btpd.h"
  27. #include "stream.h"
  28. static struct piece *
  29. piece_alloc(struct net *n, uint32_t index)
  30. {
  31. assert(!has_bit(n->busy_field, index)
  32. && n->npcs_busy < n->tp->npieces);
  33. struct piece *pc;
  34. size_t mem, field;
  35. unsigned nblocks;
  36. nblocks = torrent_piece_blocks(n->tp, index);
  37. field = (size_t)ceil(nblocks / 8.0);
  38. mem = sizeof(*pc) + field;
  39. pc = btpd_calloc(1, mem);
  40. pc->n = n;
  41. pc->down_field = (uint8_t *)(pc + 1);
  42. pc->have_field = cm_get_block_field(n->tp, index);
  43. pc->index = index;
  44. pc->nblocks = nblocks;
  45. pc->nreqs = 0;
  46. pc->next_block = 0;
  47. for (unsigned i = 0; i < nblocks; i++)
  48. if (has_bit(pc->have_field, i))
  49. pc->ngot++;
  50. assert(pc->ngot < pc->nblocks);
  51. BTPDQ_INIT(&pc->reqs);
  52. n->npcs_busy++;
  53. set_bit(n->busy_field, index);
  54. BTPDQ_INSERT_HEAD(&n->getlst, pc, entry);
  55. return pc;
  56. }
  57. void
  58. piece_free(struct piece *pc)
  59. {
  60. struct net *n = pc->n;
  61. struct block_request *req, *next;
  62. assert(n->npcs_busy > 0);
  63. n->npcs_busy--;
  64. clear_bit(n->busy_field, pc->index);
  65. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  66. BTPDQ_FOREACH_MUTABLE(req, &pc->reqs, blk_entry, next) {
  67. nb_drop(req->msg);
  68. free(req);
  69. }
  70. if (pc->eg_reqs != NULL) {
  71. for (uint32_t i = 0; i < pc->nblocks; i++)
  72. if (pc->eg_reqs[i] != NULL)
  73. nb_drop(pc->eg_reqs[i]);
  74. free(pc->eg_reqs);
  75. }
  76. free(pc);
  77. }
  78. int
  79. piece_full(struct piece *pc)
  80. {
  81. return pc->ngot + pc->nbusy == pc->nblocks;
  82. }
  83. static int
  84. dl_should_enter_endgame(struct net *n)
  85. {
  86. int should;
  87. if (cm_pieces(n->tp) + n->npcs_busy == n->tp->npieces) {
  88. should = 1;
  89. struct piece *pc;
  90. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  91. if (!piece_full(pc)) {
  92. should = 0;
  93. break;
  94. }
  95. }
  96. } else
  97. should = 0;
  98. return should;
  99. }
  100. static void
  101. dl_piece_insert_eg(struct piece *pc)
  102. {
  103. struct piece_tq *getlst = &pc->n->getlst;
  104. if (pc->nblocks == pc->ngot)
  105. BTPDQ_INSERT_TAIL(getlst, pc, entry);
  106. else {
  107. unsigned r = pc->nreqs / (pc->nblocks - pc->ngot);
  108. struct piece *it;
  109. BTPDQ_FOREACH(it, getlst, entry) {
  110. if ((it->nblocks == it->ngot
  111. || r < it->nreqs / (it->nblocks - it->ngot))) {
  112. BTPDQ_INSERT_BEFORE(it, pc, entry);
  113. break;
  114. }
  115. }
  116. if (it == NULL)
  117. BTPDQ_INSERT_TAIL(getlst, pc, entry);
  118. }
  119. }
  120. void
  121. dl_piece_reorder_eg(struct piece *pc)
  122. {
  123. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  124. dl_piece_insert_eg(pc);
  125. }
  126. static void
  127. dl_enter_endgame(struct net *n)
  128. {
  129. struct peer *p;
  130. struct piece *pc;
  131. struct piece *pcs[n->npcs_busy];
  132. unsigned pi;
  133. btpd_log(BTPD_L_POL, "Entering end game\n");
  134. n->endgame = 1;
  135. pi = 0;
  136. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  137. struct block_request *req;
  138. for (unsigned i = 0; i < pc->nblocks; i++)
  139. clear_bit(pc->down_field, i);
  140. pc->nbusy = 0;
  141. pc->eg_reqs = btpd_calloc(pc->nblocks, sizeof(struct net_buf *));
  142. BTPDQ_FOREACH(req, &pc->reqs, blk_entry) {
  143. uint32_t blki = nb_get_begin(req->msg) / PIECE_BLOCKLEN;
  144. if (pc->eg_reqs[blki] == NULL) {
  145. pc->eg_reqs[blki] = req->msg;
  146. nb_hold(req->msg);
  147. }
  148. }
  149. pcs[pi] = pc;
  150. pi++;
  151. }
  152. BTPDQ_INIT(&n->getlst);
  153. while (pi > 0) {
  154. pi--;
  155. dl_piece_insert_eg(pcs[pi]);
  156. }
  157. BTPDQ_FOREACH(p, &n->peers, p_entry) {
  158. assert(p->nwant == 0);
  159. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  160. if (peer_has(p, pc->index))
  161. peer_want(p, pc->index);
  162. }
  163. if (p->nwant > 0 && peer_leech_ok(p) && !peer_laden(p))
  164. dl_assign_requests_eg(p);
  165. }
  166. }
  167. struct piece *
  168. dl_find_piece(struct net *n, uint32_t index)
  169. {
  170. struct piece *pc;
  171. BTPDQ_FOREACH(pc, &n->getlst, entry)
  172. if (pc->index == index)
  173. break;
  174. return pc;
  175. }
  176. static int
  177. dl_piece_startable(struct peer *p, uint32_t index)
  178. {
  179. return peer_has(p, index) && !cm_has_piece(p->n->tp, index)
  180. && !has_bit(p->n->busy_field, index);
  181. }
  182. /*
  183. * Find the rarest piece the peer has, that isn't already allocated
  184. * for download or already downloaded. If no such piece can be found
  185. * return ENOENT.
  186. *
  187. * Return 0 or ENOENT, index in res.
  188. */
  189. static int
  190. dl_choose_rarest(struct peer *p, uint32_t *res)
  191. {
  192. uint32_t i;
  193. struct net *n = p->n;
  194. assert(n->endgame == 0);
  195. for (i = 0; i < n->tp->npieces && !dl_piece_startable(p, i); i++)
  196. ;
  197. if (i == n->tp->npieces)
  198. return ENOENT;
  199. uint32_t min_i = i;
  200. uint32_t min_c = 1;
  201. for(i++; i < n->tp->npieces; i++) {
  202. if (dl_piece_startable(p, i)) {
  203. if (n->piece_count[i] == n->piece_count[min_i])
  204. min_c++;
  205. else if (n->piece_count[i] < n->piece_count[min_i]) {
  206. min_i = i;
  207. min_c = 1;
  208. }
  209. }
  210. }
  211. if (min_c > 1) {
  212. min_c = rand_between(1, min_c);
  213. for (i = min_i; min_c > 0; i++) {
  214. if (dl_piece_startable(p, i)
  215. && n->piece_count[i] == n->piece_count[min_i]) {
  216. min_c--;
  217. min_i = i;
  218. }
  219. }
  220. }
  221. *res = min_i;
  222. return 0;
  223. }
  224. /*
  225. * Called from dl_piece_assign_requests when a piece becomes full.
  226. * The wanted level of the peers that has this piece will be decreased.
  227. * This function is the only one that may trigger end game.
  228. */
  229. static void
  230. dl_on_piece_full(struct piece *pc)
  231. {
  232. struct peer *p;
  233. BTPDQ_FOREACH(p, &pc->n->peers, p_entry) {
  234. if (peer_has(p, pc->index))
  235. peer_unwant(p, pc->index);
  236. }
  237. if (dl_should_enter_endgame(pc->n))
  238. dl_enter_endgame(pc->n);
  239. }
  240. /*
  241. * Allocate the piece indicated by the index for download.
  242. * There's a small possibility that a piece is fully downloaded
  243. * but haven't been tested. If such is the case the piece will
  244. * be tested and NULL will be returned. Also, we might then enter
  245. * end game.
  246. *
  247. * Return the piece or NULL.
  248. */
  249. struct piece *
  250. dl_new_piece(struct net *n, uint32_t index)
  251. {
  252. btpd_log(BTPD_L_POL, "Started on piece %u.\n", index);
  253. cm_prealloc(n->tp, index);
  254. return piece_alloc(n, index);
  255. }
  256. /*
  257. * Called when a previously full piece loses a peer.
  258. * This is needed because we have decreased the wanted
  259. * level for the peers that have this piece when it got
  260. * full. Thus we have to increase the wanted level and
  261. * try to assign requests for this piece.
  262. */
  263. void
  264. dl_on_piece_unfull(struct piece *pc)
  265. {
  266. struct net *n = pc->n;
  267. struct peer *p;
  268. assert(!piece_full(pc) && n->endgame == 0);
  269. BTPDQ_FOREACH(p, &n->peers, p_entry)
  270. if (peer_has(p, pc->index))
  271. peer_want(p, pc->index);
  272. p = BTPDQ_FIRST(&n->peers);
  273. while (p != NULL && !piece_full(pc)) {
  274. if (peer_leech_ok(p) && !peer_laden(p))
  275. dl_piece_assign_requests(pc, p); // Cannot provoke end game here.
  276. p = BTPDQ_NEXT(p, p_entry);
  277. }
  278. }
  279. #define INCNEXTBLOCK(pc) \
  280. (pc)->next_block = ((pc)->next_block + 1) % (pc)->nblocks
  281. static struct block_request *
  282. dl_new_request(struct peer *p, struct piece *pc, struct net_buf *msg)
  283. {
  284. if (msg == NULL) {
  285. uint32_t block = pc->next_block;
  286. uint32_t start = block * PIECE_BLOCKLEN;
  287. uint32_t length =
  288. torrent_block_size(pc->n->tp, pc->index, pc->nblocks, block);
  289. msg = nb_create_request(pc->index, start, length);
  290. }
  291. struct block_request *req = btpd_malloc(sizeof(*req));
  292. req->p = p;
  293. req->msg = msg;
  294. nb_hold(req->msg);
  295. BTPDQ_INSERT_TAIL(&pc->reqs, req, blk_entry);
  296. pc->nreqs++;
  297. if (!pc->n->endgame) {
  298. set_bit(pc->down_field, pc->next_block);
  299. pc->nbusy++;
  300. }
  301. peer_request(p, req);
  302. return req;
  303. }
  304. /*
  305. * Request as many blocks as possible on this piece from
  306. * the peer. If the piece becomes full we call dl_on_piece_full.
  307. *
  308. * Return the number of requests sent.
  309. */
  310. unsigned
  311. dl_piece_assign_requests(struct piece *pc, struct peer *p)
  312. {
  313. assert(!piece_full(pc) && !peer_laden(p));
  314. unsigned count = 0;
  315. do {
  316. while ((has_bit(pc->have_field, pc->next_block)
  317. || has_bit(pc->down_field, pc->next_block)))
  318. INCNEXTBLOCK(pc);
  319. dl_new_request(p, pc, NULL);
  320. INCNEXTBLOCK(pc);
  321. count++;
  322. } while (!piece_full(pc) && !peer_laden(p));
  323. if (piece_full(pc))
  324. dl_on_piece_full(pc);
  325. return count;
  326. }
  327. /*
  328. * Request as many blocks as possible from the peer. Puts
  329. * requests on already active pieces before starting on new
  330. * ones. Care must be taken since end game mode may be triggered
  331. * by the calls to dl_piece_assign_requests.
  332. *
  333. * Returns number of requests sent.
  334. *
  335. * XXX: should do something smart when deciding on which
  336. * already started piece to put requests on.
  337. */
  338. unsigned
  339. dl_assign_requests(struct peer *p)
  340. {
  341. assert(!p->n->endgame && !peer_laden(p));
  342. struct piece *pc;
  343. struct net *n = p->n;
  344. unsigned count = 0;
  345. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  346. if (piece_full(pc) || !peer_has(p, pc->index))
  347. continue;
  348. count += dl_piece_assign_requests(pc, p);
  349. if (n->endgame)
  350. break;
  351. if (!piece_full(pc))
  352. assert(peer_laden(p));
  353. if (peer_laden(p))
  354. break;
  355. }
  356. while (!peer_laden(p) && !n->endgame) {
  357. uint32_t index;
  358. if (dl_choose_rarest(p, &index) == 0) {
  359. pc = dl_new_piece(n, index);
  360. if (pc != NULL)
  361. count += dl_piece_assign_requests(pc, p);
  362. } else
  363. break;
  364. }
  365. return count;
  366. }
  367. void
  368. dl_unassign_requests(struct peer *p)
  369. {
  370. while (p->nreqs_out > 0) {
  371. struct block_request *req = BTPDQ_FIRST(&p->my_reqs);
  372. struct piece *pc = dl_find_piece(p->n, nb_get_index(req->msg));
  373. int was_full = piece_full(pc);
  374. while (req != NULL) {
  375. struct block_request *next = BTPDQ_NEXT(req, p_entry);
  376. uint32_t blki = nb_get_begin(req->msg) / PIECE_BLOCKLEN;
  377. // XXX: Needs to be looked at if we introduce snubbing.
  378. assert(has_bit(pc->down_field, blki));
  379. clear_bit(pc->down_field, blki);
  380. pc->nbusy--;
  381. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  382. p->nreqs_out--;
  383. BTPDQ_REMOVE(&pc->reqs, req, blk_entry);
  384. nb_drop(req->msg);
  385. free(req);
  386. pc->nreqs--;
  387. while (next != NULL && nb_get_index(next->msg) != pc->index)
  388. next = BTPDQ_NEXT(next, p_entry);
  389. req = next;
  390. }
  391. if (p->nreqs_out == 0)
  392. peer_on_no_reqs(p);
  393. if (was_full && !piece_full(pc))
  394. dl_on_piece_unfull(pc);
  395. }
  396. assert(BTPDQ_EMPTY(&p->my_reqs));
  397. }
  398. static void
  399. dl_piece_assign_requests_eg(struct piece *pc, struct peer *p)
  400. {
  401. unsigned first_block = pc->next_block;
  402. do {
  403. if ((has_bit(pc->have_field, pc->next_block)
  404. || peer_requested(p, pc->index, pc->next_block))) {
  405. INCNEXTBLOCK(pc);
  406. continue;
  407. }
  408. struct block_request *req =
  409. dl_new_request(p, pc, pc->eg_reqs[pc->next_block]);
  410. if (pc->eg_reqs[pc->next_block] == NULL) {
  411. pc->eg_reqs[pc->next_block] = req->msg;
  412. nb_hold(req->msg);
  413. }
  414. INCNEXTBLOCK(pc);
  415. } while (!peer_laden(p) && pc->next_block != first_block);
  416. }
  417. void
  418. dl_assign_requests_eg(struct peer *p)
  419. {
  420. assert(!peer_laden(p));
  421. struct net *n = p->n;
  422. struct piece_tq tmp;
  423. BTPDQ_INIT(&tmp);
  424. struct piece *pc = BTPDQ_FIRST(&n->getlst);
  425. while (!peer_laden(p) && pc != NULL) {
  426. struct piece *next = BTPDQ_NEXT(pc, entry);
  427. if (peer_has(p, pc->index) && pc->nblocks != pc->ngot) {
  428. dl_piece_assign_requests_eg(pc, p);
  429. BTPDQ_REMOVE(&n->getlst, pc, entry);
  430. BTPDQ_INSERT_HEAD(&tmp, pc, entry);
  431. }
  432. pc = next;
  433. }
  434. pc = BTPDQ_FIRST(&tmp);
  435. while (pc != NULL) {
  436. struct piece *next = BTPDQ_NEXT(pc, entry);
  437. dl_piece_insert_eg(pc);
  438. pc = next;
  439. }
  440. }
  441. void
  442. dl_unassign_requests_eg(struct peer *p)
  443. {
  444. struct block_request *req;
  445. struct piece *pc;
  446. struct piece_tq tmp;
  447. BTPDQ_INIT(&tmp);
  448. while (p->nreqs_out > 0) {
  449. req = BTPDQ_FIRST(&p->my_reqs);
  450. pc = dl_find_piece(p->n, nb_get_index(req->msg));
  451. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  452. BTPDQ_INSERT_HEAD(&tmp, pc, entry);
  453. while (req != NULL) {
  454. struct block_request *next = BTPDQ_NEXT(req, p_entry);
  455. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  456. p->nreqs_out--;
  457. BTPDQ_REMOVE(&pc->reqs, req, blk_entry);
  458. nb_drop(req->msg);
  459. free(req);
  460. pc->nreqs--;
  461. while (next != NULL && nb_get_index(next->msg) != pc->index)
  462. next = BTPDQ_NEXT(next, p_entry);
  463. req = next;
  464. }
  465. }
  466. assert(BTPDQ_EMPTY(&p->my_reqs));
  467. peer_on_no_reqs(p);
  468. pc = BTPDQ_FIRST(&tmp);
  469. while (pc != NULL) {
  470. struct piece *next = BTPDQ_NEXT(pc, entry);
  471. dl_piece_insert_eg(pc);
  472. pc = next;
  473. }
  474. }