A clone of btpd with my configuration changes.
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

715 lignes
17 KiB

  1. #include "btpd.h"
  2. #include <ctype.h>
  3. struct meta_peer *
  4. mp_create(void)
  5. {
  6. return btpd_calloc(1, sizeof(struct meta_peer));
  7. }
  8. void
  9. mp_kill(struct meta_peer *mp)
  10. {
  11. free(mp);
  12. }
  13. void
  14. mp_hold(struct meta_peer *mp)
  15. {
  16. mp->refs++;
  17. }
  18. void
  19. mp_drop(struct meta_peer *mp, struct net *n)
  20. {
  21. assert(mp->refs > 0);
  22. mp->refs--;
  23. if (mp->refs == 0) {
  24. assert(mp->p == NULL);
  25. if (mp->flags & PF_ATTACHED)
  26. assert(mptbl_remove(n->mptbl, mp->id) == mp);
  27. mp_kill(mp);
  28. }
  29. }
  30. void
  31. peer_kill(struct peer *p)
  32. {
  33. struct nb_link *nl;
  34. btpd_log(BTPD_L_CONN, "killed peer %p\n", p);
  35. if (p->mp->flags & PF_ATTACHED) {
  36. BTPDQ_REMOVE(&p->n->peers, p, p_entry);
  37. p->n->npeers--;
  38. if (p->n->active) {
  39. ul_on_lost_peer(p);
  40. dl_on_lost_peer(p);
  41. }
  42. } else
  43. BTPDQ_REMOVE(&net_unattached, p, p_entry);
  44. if (p->mp->flags & PF_ON_READQ)
  45. BTPDQ_REMOVE(&net_bw_readq, p, rq_entry);
  46. if (p->mp->flags & PF_ON_WRITEQ)
  47. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  48. btpd_ev_del(&p->ioev);
  49. close(p->sd);
  50. nl = BTPDQ_FIRST(&p->outq);
  51. while (nl != NULL) {
  52. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  53. nb_drop(nl->nb);
  54. free(nl);
  55. nl = next;
  56. }
  57. p->mp->p = NULL;
  58. mp_drop(p->mp, p->n);
  59. if (p->in.buf != NULL)
  60. free(p->in.buf);
  61. if (p->piece_field != NULL)
  62. free(p->piece_field);
  63. if (p->bad_field != NULL)
  64. free(p->bad_field);
  65. free(p);
  66. net_npeers--;
  67. }
  68. void
  69. peer_set_in_state(struct peer *p, enum input_state state, size_t size)
  70. {
  71. p->in.state = state;
  72. p->in.st_bytes = size;
  73. }
  74. void
  75. peer_send(struct peer *p, struct net_buf *nb)
  76. {
  77. struct nb_link *nl = btpd_calloc(1, sizeof(*nl));
  78. nl->nb = nb;
  79. nb_hold(nb);
  80. if (BTPDQ_EMPTY(&p->outq)) {
  81. assert(p->outq_off == 0);
  82. btpd_ev_enable(&p->ioev, EV_WRITE);
  83. p->t_wantwrite = btpd_seconds;
  84. }
  85. BTPDQ_INSERT_TAIL(&p->outq, nl, entry);
  86. }
  87. /*
  88. * Remove a network buffer from the peer's outq.
  89. * If a part of the buffer already have been written
  90. * to the network it cannot be removed.
  91. *
  92. * Returns 1 if the buffer is removed, 0 if not.
  93. */
  94. int
  95. peer_unsend(struct peer *p, struct nb_link *nl)
  96. {
  97. if (!(nl == BTPDQ_FIRST(&p->outq) && p->outq_off > 0)) {
  98. BTPDQ_REMOVE(&p->outq, nl, entry);
  99. if (nl->nb->type == NB_TORRENTDATA) {
  100. assert(p->npiece_msgs > 0);
  101. p->npiece_msgs--;
  102. }
  103. nb_drop(nl->nb);
  104. free(nl);
  105. if (BTPDQ_EMPTY(&p->outq)) {
  106. if (p->mp->flags & PF_ON_WRITEQ) {
  107. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  108. p->mp->flags &= ~PF_ON_WRITEQ;
  109. } else
  110. btpd_ev_disable(&p->ioev, EV_WRITE);
  111. }
  112. return 1;
  113. } else
  114. return 0;
  115. }
  116. void
  117. peer_sent(struct peer *p, struct net_buf *nb)
  118. {
  119. switch (nb->type) {
  120. case NB_KEEPALIVE:
  121. btpd_log(BTPD_L_MSG, "sent keepalive to %p\n", p);
  122. break;
  123. case NB_CHOKE:
  124. btpd_log(BTPD_L_MSG, "sent choke to %p\n", p);
  125. break;
  126. case NB_UNCHOKE:
  127. btpd_log(BTPD_L_MSG, "sent unchoke to %p\n", p);
  128. p->mp->flags &= ~PF_NO_REQUESTS;
  129. break;
  130. case NB_INTEREST:
  131. btpd_log(BTPD_L_MSG, "sent interest to %p\n", p);
  132. break;
  133. case NB_UNINTEREST:
  134. btpd_log(BTPD_L_MSG, "sent uninterest to %p\n", p);
  135. break;
  136. case NB_HAVE:
  137. btpd_log(BTPD_L_MSG, "sent have(%u) to %p\n",
  138. nb_get_index(nb), p);
  139. break;
  140. case NB_BITFIELD:
  141. btpd_log(BTPD_L_MSG, "sent bitfield to %p\n", p);
  142. break;
  143. case NB_REQUEST:
  144. btpd_log(BTPD_L_MSG, "sent request(%u,%u,%u) to %p\n",
  145. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  146. break;
  147. case NB_PIECE:
  148. btpd_log(BTPD_L_MSG, "sent piece(%u,%u,%u) to %p\n",
  149. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  150. break;
  151. case NB_CANCEL:
  152. btpd_log(BTPD_L_MSG, "sent cancel(%u,%u,%u) to %p\n",
  153. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  154. break;
  155. case NB_TORRENTDATA:
  156. btpd_log(BTPD_L_MSG, "sent data to %p\n", p);
  157. assert(p->npiece_msgs > 0);
  158. p->npiece_msgs--;
  159. break;
  160. case NB_MULTIHAVE:
  161. btpd_log(BTPD_L_MSG, "sent multihave to %p\n", p);
  162. break;
  163. case NB_BITDATA:
  164. btpd_log(BTPD_L_MSG, "sent bitdata to %p\n", p);
  165. break;
  166. case NB_SHAKE:
  167. btpd_log(BTPD_L_MSG, "sent shake to %p\n", p);
  168. break;
  169. }
  170. }
  171. void
  172. peer_request(struct peer *p, struct block_request *req)
  173. {
  174. assert(p->nreqs_out < MAXPIPEDREQUESTS);
  175. p->nreqs_out++;
  176. BTPDQ_INSERT_TAIL(&p->my_reqs, req, p_entry);
  177. peer_send(p, req->msg);
  178. }
  179. int
  180. peer_requested(struct peer *p, uint32_t piece, uint32_t block)
  181. {
  182. uint32_t begin = block * PIECE_BLOCKLEN;
  183. struct block_request *req;
  184. BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
  185. if (nb_get_index(req->msg) == piece && nb_get_begin(req->msg) == begin)
  186. return 1;
  187. return 0;
  188. }
  189. void
  190. peer_keepalive(struct peer *p)
  191. {
  192. peer_send(p, nb_create_keepalive());
  193. }
  194. void
  195. peer_cancel(struct peer *p, struct block_request *req, struct net_buf *nb)
  196. {
  197. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  198. p->nreqs_out--;
  199. int removed = 0;
  200. struct nb_link *nl;
  201. BTPDQ_FOREACH(nl, &p->outq, entry) {
  202. if (nl->nb == req->msg) {
  203. removed = peer_unsend(p, nl);
  204. break;
  205. }
  206. }
  207. if (!removed)
  208. peer_send(p, nb);
  209. if (p->nreqs_out == 0)
  210. peer_on_no_reqs(p);
  211. }
  212. void
  213. peer_unchoke(struct peer *p)
  214. {
  215. p->mp->flags &= ~PF_I_CHOKE;
  216. peer_send(p, nb_create_unchoke());
  217. }
  218. void
  219. peer_choke(struct peer *p)
  220. {
  221. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  222. while (nl != NULL) {
  223. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  224. if (nl->nb->type == NB_PIECE) {
  225. struct nb_link *data = next;
  226. next = BTPDQ_NEXT(next, entry);
  227. if (peer_unsend(p, nl))
  228. peer_unsend(p, data);
  229. }
  230. nl = next;
  231. }
  232. p->mp->flags |= PF_I_CHOKE;
  233. peer_send(p, nb_create_choke());
  234. }
  235. void
  236. peer_want(struct peer *p, uint32_t index)
  237. {
  238. if (!has_bit(p->piece_field, index) || peer_has_bad(p, index))
  239. return;
  240. assert(p->nwant < p->npieces);
  241. p->nwant++;
  242. if (p->nwant == 1) {
  243. p->mp->flags |= PF_I_WANT;
  244. if (p->mp->flags & PF_SUSPECT)
  245. return;
  246. if (p->nreqs_out == 0) {
  247. assert((p->mp->flags & PF_DO_UNWANT) == 0);
  248. int unsent = 0;
  249. struct nb_link *nl = BTPDQ_LAST(&p->outq, nb_tq);
  250. if (nl != NULL && nl->nb->type == NB_UNINTEREST)
  251. unsent = peer_unsend(p, nl);
  252. if (!unsent)
  253. peer_send(p, nb_create_interest());
  254. } else {
  255. assert((p->mp->flags & PF_DO_UNWANT) != 0);
  256. p->mp->flags &= ~PF_DO_UNWANT;
  257. }
  258. }
  259. }
  260. void
  261. peer_unwant(struct peer *p, uint32_t index)
  262. {
  263. if (!has_bit(p->piece_field, index) || peer_has_bad(p, index))
  264. return;
  265. assert(p->nwant > 0);
  266. p->nwant--;
  267. if (p->nwant == 0) {
  268. p->mp->flags &= ~PF_I_WANT;
  269. if (p->mp->flags & PF_SUSPECT)
  270. return;
  271. p->t_nointerest = btpd_seconds;
  272. if (p->nreqs_out == 0)
  273. peer_send(p, nb_create_uninterest());
  274. else
  275. p->mp->flags |= PF_DO_UNWANT;
  276. }
  277. }
  278. static struct peer *
  279. peer_create_common(int sd)
  280. {
  281. struct peer *p = btpd_calloc(1, sizeof(*p));
  282. p->mp = mp_create();
  283. mp_hold(p->mp);
  284. p->mp->p = p;
  285. p->sd = sd;
  286. p->mp->flags = PF_I_CHOKE | PF_P_CHOKE;
  287. p->t_created = btpd_seconds;
  288. p->t_lastwrite = btpd_seconds;
  289. p->t_nointerest = btpd_seconds;
  290. BTPDQ_INIT(&p->my_reqs);
  291. BTPDQ_INIT(&p->outq);
  292. peer_set_in_state(p, SHAKE_PSTR, 28);
  293. btpd_ev_new(&p->ioev, p->sd, EV_READ, net_io_cb, p);
  294. BTPDQ_INSERT_TAIL(&net_unattached, p, p_entry);
  295. net_npeers++;
  296. return p;
  297. }
  298. void
  299. peer_create_in(int sd)
  300. {
  301. struct peer *p = peer_create_common(sd);
  302. p->mp->flags |= PF_INCOMING;
  303. }
  304. void
  305. peer_create_out(struct net *n, const uint8_t *id,
  306. const char *ip, int port)
  307. {
  308. int sd;
  309. struct peer *p;
  310. if (net_connect_name(ip, port, &sd) != 0)
  311. return;
  312. p = peer_create_common(sd);
  313. p->n = n;
  314. peer_send(p, nb_create_shake(n->tp));
  315. }
  316. void
  317. peer_create_out_compact(struct net *n, int family, const char *compact)
  318. {
  319. int sd;
  320. struct peer *p;
  321. struct sockaddr_storage addr;
  322. struct sockaddr_in *a4;
  323. struct sockaddr_in6 *a6;
  324. socklen_t addrlen;
  325. switch (family) {
  326. case AF_INET:
  327. if (!net_ipv4)
  328. return;
  329. a4 = (struct sockaddr_in *)&addr;
  330. a4->sin_family = AF_INET;
  331. addrlen = sizeof(*a4);
  332. bcopy(compact, &a4->sin_addr.s_addr, 4);
  333. bcopy(compact + 4, &a4->sin_port, 2);
  334. break;
  335. case AF_INET6:
  336. if (!net_ipv6)
  337. return;
  338. a6 = (struct sockaddr_in6 *)&addr;
  339. a6->sin6_family = AF_INET6;
  340. addrlen = sizeof(*a6);
  341. bcopy(compact, &a6->sin6_addr, 16);
  342. bcopy(compact + 16, &a6->sin6_port, 2);
  343. break;
  344. default:
  345. abort();
  346. }
  347. if (net_connect_addr(family, (struct sockaddr *)&addr, addrlen, &sd) != 0)
  348. return;
  349. p = peer_create_common(sd);
  350. p->n = n;
  351. peer_send(p, nb_create_shake(n->tp));
  352. }
  353. void
  354. peer_on_no_reqs(struct peer *p)
  355. {
  356. if ((p->mp->flags & PF_DO_UNWANT) != 0) {
  357. assert(p->nwant == 0);
  358. p->mp->flags &= ~PF_DO_UNWANT;
  359. peer_send(p, nb_create_uninterest());
  360. }
  361. }
  362. void
  363. peer_on_keepalive(struct peer *p)
  364. {
  365. btpd_log(BTPD_L_MSG, "received keep alive from %p\n", p);
  366. }
  367. void
  368. peer_on_shake(struct peer *p)
  369. {
  370. uint8_t printid[21];
  371. int i;
  372. for (i = 0; i < 20 && isprint(p->mp->id[i]); i++)
  373. printid[i] = p->mp->id[i];
  374. printid[i] = '\0';
  375. btpd_log(BTPD_L_MSG, "received shake(%s) from %p\n", printid, p);
  376. p->piece_field = btpd_calloc(1, (int)ceil(p->n->tp->npieces / 8.0));
  377. if (cm_pieces(p->n->tp) > 0) {
  378. if ((cm_pieces(p->n->tp) * 9 < 5 +
  379. ceil(p->n->tp->npieces / 8.0)))
  380. peer_send(p, nb_create_multihave(p->n->tp));
  381. else {
  382. peer_send(p, nb_create_bitfield(p->n->tp));
  383. peer_send(p, nb_create_bitdata(p->n->tp));
  384. }
  385. }
  386. mptbl_insert(p->n->mptbl, p->mp);
  387. BTPDQ_REMOVE(&net_unattached, p, p_entry);
  388. BTPDQ_INSERT_HEAD(&p->n->peers, p, p_entry);
  389. p->mp->flags |= PF_ATTACHED;
  390. p->n->npeers++;
  391. ul_on_new_peer(p);
  392. dl_on_new_peer(p);
  393. }
  394. void
  395. peer_on_choke(struct peer *p)
  396. {
  397. btpd_log(BTPD_L_MSG, "received choke from %p\n", p);
  398. if ((p->mp->flags & PF_P_CHOKE) != 0)
  399. return;
  400. else {
  401. p->mp->flags |= PF_P_CHOKE;
  402. dl_on_choke(p);
  403. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  404. while (nl != NULL) {
  405. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  406. if (nl->nb->type == NB_REQUEST)
  407. peer_unsend(p, nl);
  408. nl = next;
  409. }
  410. }
  411. }
  412. void
  413. peer_on_unchoke(struct peer *p)
  414. {
  415. btpd_log(BTPD_L_MSG, "received unchoke from %p\n", p);
  416. if ((p->mp->flags & PF_P_CHOKE) == 0)
  417. return;
  418. else {
  419. p->mp->flags &= ~PF_P_CHOKE;
  420. dl_on_unchoke(p);
  421. }
  422. }
  423. void
  424. peer_on_interest(struct peer *p)
  425. {
  426. btpd_log(BTPD_L_MSG, "received interest from %p\n", p);
  427. if ((p->mp->flags & PF_P_WANT) != 0)
  428. return;
  429. else {
  430. p->mp->flags |= PF_P_WANT;
  431. ul_on_interest(p);
  432. }
  433. }
  434. void
  435. peer_on_uninterest(struct peer *p)
  436. {
  437. btpd_log(BTPD_L_MSG, "received uninterest from %p\n", p);
  438. if ((p->mp->flags & PF_P_WANT) == 0)
  439. return;
  440. else {
  441. p->mp->flags &= ~PF_P_WANT;
  442. p->t_nointerest = btpd_seconds;
  443. ul_on_uninterest(p);
  444. }
  445. }
  446. void
  447. peer_on_have(struct peer *p, uint32_t index)
  448. {
  449. btpd_log(BTPD_L_MSG, "received have(%u) from %p\n", index, p);
  450. if (!has_bit(p->piece_field, index)) {
  451. set_bit(p->piece_field, index);
  452. p->npieces++;
  453. dl_on_piece_ann(p, index);
  454. }
  455. }
  456. void
  457. peer_on_bitfield(struct peer *p, const uint8_t *field)
  458. {
  459. btpd_log(BTPD_L_MSG, "received bitfield from %p\n", p);
  460. assert(p->npieces == 0);
  461. bcopy(field, p->piece_field, (size_t)ceil(p->n->tp->npieces / 8.0));
  462. for (uint32_t i = 0; i < p->n->tp->npieces; i++) {
  463. if (has_bit(p->piece_field, i)) {
  464. p->npieces++;
  465. dl_on_piece_ann(p, i);
  466. }
  467. }
  468. }
  469. void
  470. peer_on_piece(struct peer *p, uint32_t index, uint32_t begin,
  471. uint32_t length, const char *data)
  472. {
  473. struct block_request *req;
  474. BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
  475. if ((nb_get_begin(req->msg) == begin &&
  476. nb_get_index(req->msg) == index &&
  477. nb_get_length(req->msg) == length))
  478. break;
  479. if (req != NULL) {
  480. btpd_log(BTPD_L_MSG, "received piece(%u,%u,%u) from %p\n",
  481. index, begin, length, p);
  482. assert(p->nreqs_out > 0);
  483. p->nreqs_out--;
  484. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  485. if (p->nreqs_out == 0)
  486. peer_on_no_reqs(p);
  487. dl_on_block(p, req, index, begin, length, data);
  488. } else
  489. btpd_log(BTPD_L_MSG, "discarded piece(%u,%u,%u) from %p\n",
  490. index, begin, length, p);
  491. }
  492. void
  493. peer_on_request(struct peer *p, uint32_t index, uint32_t begin,
  494. uint32_t length)
  495. {
  496. btpd_log(BTPD_L_MSG, "received request(%u,%u,%u) from %p\n",
  497. index, begin, length, p);
  498. if ((p->mp->flags & PF_NO_REQUESTS) == 0) {
  499. peer_send(p, nb_create_piece(index, begin, length));
  500. peer_send(p, nb_create_torrentdata());
  501. p->npiece_msgs++;
  502. if (p->npiece_msgs >= MAXPIECEMSGS) {
  503. peer_send(p, nb_create_choke());
  504. peer_send(p, nb_create_unchoke());
  505. p->mp->flags |= PF_NO_REQUESTS;
  506. }
  507. }
  508. }
  509. void
  510. peer_on_cancel(struct peer *p, uint32_t index, uint32_t begin,
  511. uint32_t length)
  512. {
  513. btpd_log(BTPD_L_MSG, "received cancel(%u,%u,%u) from %p\n",
  514. index, begin, length, p);
  515. struct nb_link *nl;
  516. BTPDQ_FOREACH(nl, &p->outq, entry)
  517. if (nl->nb->type == NB_PIECE
  518. && nb_get_begin(nl->nb) == begin
  519. && nb_get_index(nl->nb) == index
  520. && nb_get_length(nl->nb) == length) {
  521. struct nb_link *data = BTPDQ_NEXT(nl, entry);
  522. if (peer_unsend(p, nl))
  523. peer_unsend(p, data);
  524. break;
  525. }
  526. }
  527. void
  528. peer_on_tick(struct peer *p)
  529. {
  530. if (p->mp->flags & PF_BANNED)
  531. goto kill;
  532. if (p->mp->flags & PF_ATTACHED) {
  533. if (BTPDQ_EMPTY(&p->outq)) {
  534. if (btpd_seconds - p->t_lastwrite >= 120)
  535. peer_keepalive(p);
  536. } else if (btpd_seconds - p->t_wantwrite >= 60) {
  537. btpd_log(BTPD_L_CONN, "write attempt timed out.\n");
  538. goto kill;
  539. }
  540. if ((cm_full(p->n->tp) && !(p->mp->flags & PF_P_WANT) &&
  541. btpd_seconds - p->t_nointerest >= 600)) {
  542. btpd_log(BTPD_L_CONN, "no interest for 10 minutes.\n");
  543. goto kill;
  544. }
  545. } else if (btpd_seconds - p->t_created >= 60) {
  546. btpd_log(BTPD_L_CONN, "hand shake timed out.\n");
  547. goto kill;
  548. }
  549. return;
  550. kill:
  551. peer_kill(p);
  552. }
  553. void
  554. peer_bad_piece(struct peer *p, uint32_t index)
  555. {
  556. if (p->npcs_bad == 0) {
  557. assert(p->bad_field == NULL);
  558. p->bad_field = btpd_calloc(ceil(p->n->tp->npieces / 8.0), 1);
  559. }
  560. assert(!has_bit(p->bad_field, index));
  561. set_bit(p->bad_field, index);
  562. p->npcs_bad++;
  563. p->suspicion++;
  564. if (p->suspicion == 3) {
  565. btpd_log(BTPD_L_BAD, "suspect peer %p.\n", p);
  566. p->mp->flags |= PF_SUSPECT;
  567. if (p->nwant > 0) {
  568. p->mp->flags &= ~PF_DO_UNWANT;
  569. peer_send(p, nb_create_uninterest());
  570. }
  571. }
  572. }
  573. void
  574. peer_good_piece(struct peer *p, uint32_t index)
  575. {
  576. if (peer_has_bad(p, index)) {
  577. assert(p->npcs_bad > 0);
  578. p->npcs_bad--;
  579. if (p->npcs_bad == 0) {
  580. free(p->bad_field);
  581. p->bad_field = NULL;
  582. } else
  583. clear_bit(p->bad_field, index);
  584. }
  585. p->suspicion = 0;
  586. if (p->mp->flags & PF_SUSPECT) {
  587. btpd_log(BTPD_L_BAD, "unsuspect peer %p.\n", p);
  588. p->mp->flags &= ~PF_SUSPECT;
  589. if (p->nwant > 0) {
  590. assert(p->mp->flags & PF_I_WANT);
  591. peer_send(p, nb_create_interest());
  592. }
  593. if (peer_leech_ok(p))
  594. dl_on_download(p);
  595. }
  596. }
  597. int
  598. peer_chokes(struct peer *p)
  599. {
  600. return p->mp->flags & PF_P_CHOKE;
  601. }
  602. int
  603. peer_has(struct peer *p, uint32_t index)
  604. {
  605. return has_bit(p->piece_field, index);
  606. }
  607. int
  608. peer_has_bad(struct peer *p, uint32_t index)
  609. {
  610. return p->bad_field != NULL && has_bit(p->bad_field, index);
  611. }
  612. int
  613. peer_laden(struct peer *p)
  614. {
  615. return p->nreqs_out >= MAXPIPEDREQUESTS;
  616. }
  617. int
  618. peer_wanted(struct peer *p)
  619. {
  620. return (p->mp->flags & PF_I_WANT) == PF_I_WANT;
  621. }
  622. int
  623. peer_leech_ok(struct peer *p)
  624. {
  625. return (p->mp->flags & (PF_BANNED|PF_SUSPECT|PF_I_WANT|PF_P_CHOKE))
  626. == PF_I_WANT && !peer_laden(p);
  627. }
  628. int
  629. peer_active_down(struct peer *p)
  630. {
  631. return peer_leech_ok(p) || p->nreqs_out > 0;
  632. }
  633. int
  634. peer_active_up(struct peer *p)
  635. {
  636. return (p->mp->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT
  637. || p->npiece_msgs > 0;
  638. }
  639. int
  640. peer_full(struct peer *p)
  641. {
  642. return p->npieces == p->n->tp->npieces;
  643. }
  644. int
  645. peer_requestable(struct peer *p, uint32_t index)
  646. {
  647. return peer_has(p, index) && !peer_has_bad(p, index);
  648. }