A clone of btpd with my configuration changes.
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

511 lignes
11 KiB

  1. #include <sys/types.h>
  2. #include <sys/uio.h>
  3. #include <sys/socket.h>
  4. #include <netinet/in.h>
  5. #include <netdb.h>
  6. #include <sys/mman.h>
  7. #include <sys/wait.h>
  8. #include <assert.h>
  9. #include <errno.h>
  10. #include <fcntl.h>
  11. #include <math.h>
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <unistd.h>
  16. #include "btpd.h"
  17. #define min(x, y) ((x) <= (y) ? (x) : (y))
  18. void
  19. net_write32(void *buf, uint32_t num)
  20. {
  21. *(uint32_t *)buf = htonl(num);
  22. }
  23. uint32_t
  24. net_read32(const void *buf)
  25. {
  26. return ntohl(*(uint32_t *)buf);
  27. }
  28. static unsigned long
  29. net_write(struct peer *p, unsigned long wmax)
  30. {
  31. struct nb_link *nl;
  32. struct iovec iov[IOV_MAX];
  33. int niov;
  34. int limited;
  35. ssize_t nwritten;
  36. unsigned long bcount;
  37. limited = wmax > 0;
  38. niov = 0;
  39. assert((nl = BTPDQ_FIRST(&p->outq)) != NULL);
  40. while ((niov < IOV_MAX && nl != NULL
  41. && (!limited || (limited && wmax > 0)))) {
  42. if (niov > 0) {
  43. iov[niov].iov_base = nl->nb->buf;
  44. iov[niov].iov_len = nl->nb->len;
  45. } else {
  46. iov[niov].iov_base = nl->nb->buf + p->outq_off;
  47. iov[niov].iov_len = nl->nb->len - p->outq_off;
  48. }
  49. if (limited) {
  50. if (iov[niov].iov_len > wmax)
  51. iov[niov].iov_len = wmax;
  52. wmax -= iov[niov].iov_len;
  53. }
  54. niov++;
  55. nl = BTPDQ_NEXT(nl, entry);
  56. }
  57. nwritten = writev(p->sd, iov, niov);
  58. if (nwritten < 0) {
  59. if (errno == EAGAIN) {
  60. event_add(&p->out_ev, WRITE_TIMEOUT);
  61. return 0;
  62. } else {
  63. btpd_log(BTPD_L_CONN, "write error: %s\n", strerror(errno));
  64. peer_kill(p);
  65. return 0;
  66. }
  67. } else if (nwritten == 0) {
  68. btpd_log(BTPD_L_CONN, "connection closed by peer.\n");
  69. peer_kill(p);
  70. return 0;
  71. }
  72. bcount = nwritten;
  73. nl = BTPDQ_FIRST(&p->outq);
  74. while (bcount > 0) {
  75. unsigned long bufdelta = nl->nb->len - p->outq_off;
  76. if (bcount >= bufdelta) {
  77. peer_sent(p, nl->nb);
  78. if (nl->nb->type == NB_TORRENTDATA) {
  79. p->tp->uploaded += bufdelta;
  80. p->rate_from_me[btpd.seconds % RATEHISTORY] += bufdelta;
  81. }
  82. bcount -= bufdelta;
  83. BTPDQ_REMOVE(&p->outq, nl, entry);
  84. nb_drop(nl->nb);
  85. free(nl);
  86. p->outq_off = 0;
  87. nl = BTPDQ_FIRST(&p->outq);
  88. } else {
  89. if (nl->nb->type == NB_TORRENTDATA) {
  90. p->tp->uploaded += bcount;
  91. p->rate_from_me[btpd.seconds % RATEHISTORY] += bcount;
  92. }
  93. p->outq_off += bcount;
  94. bcount = 0;
  95. }
  96. }
  97. if (!BTPDQ_EMPTY(&p->outq))
  98. event_add(&p->out_ev, WRITE_TIMEOUT);
  99. return nwritten;
  100. }
  101. void
  102. net_set_state(struct peer *p, enum net_state state, size_t size)
  103. {
  104. p->net.state = state;
  105. p->net.st_bytes = size;
  106. }
  107. static int
  108. net_dispatch_msg(struct peer *p, const char *buf)
  109. {
  110. uint32_t index, begin, length;
  111. int res = 0;
  112. switch (p->net.msg_num) {
  113. case MSG_CHOKE:
  114. peer_on_choke(p);
  115. break;
  116. case MSG_UNCHOKE:
  117. peer_on_unchoke(p);
  118. break;
  119. case MSG_INTEREST:
  120. peer_on_interest(p);
  121. break;
  122. case MSG_UNINTEREST:
  123. peer_on_uninterest(p);
  124. break;
  125. case MSG_HAVE:
  126. peer_on_have(p, net_read32(buf));
  127. break;
  128. case MSG_BITFIELD:
  129. if (p->npieces == 0)
  130. peer_on_bitfield(p, buf);
  131. else
  132. res = 1;
  133. break;
  134. case MSG_REQUEST:
  135. if ((p->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT) {
  136. index = net_read32(buf);
  137. begin = net_read32(buf + 4);
  138. length = net_read32(buf + 8);
  139. if ((length > PIECE_BLOCKLEN
  140. || index >= p->tp->meta.npieces
  141. || !has_bit(p->tp->piece_field, index)
  142. || begin + length > torrent_piece_size(p->tp, index))) {
  143. btpd_log(BTPD_L_MSG, "bad request: (%u, %u, %u) from %p\n",
  144. index, begin, length, p);
  145. res = 1;
  146. break;
  147. }
  148. peer_on_request(p, index, begin, length);
  149. }
  150. break;
  151. case MSG_CANCEL:
  152. index = net_read32(buf);
  153. begin = net_read32(buf + 4);
  154. length = net_read32(buf + 8);
  155. peer_on_cancel(p, index, begin, length);
  156. break;
  157. case MSG_PIECE:
  158. index = net_read32(buf);
  159. begin = net_read32(buf + 4);
  160. length = p->net.msg_len - 9;
  161. peer_on_piece(p, index, begin, length, buf + 8);
  162. break;
  163. default:
  164. abort();
  165. }
  166. return res;
  167. }
  168. static int
  169. net_mh_ok(struct peer *p)
  170. {
  171. uint32_t mlen = p->net.msg_len;
  172. switch (p->net.msg_num) {
  173. case MSG_CHOKE:
  174. case MSG_UNCHOKE:
  175. case MSG_INTEREST:
  176. case MSG_UNINTEREST:
  177. return mlen == 1;
  178. case MSG_HAVE:
  179. return mlen == 5;
  180. case MSG_BITFIELD:
  181. return mlen == (uint32_t)ceil(p->tp->meta.npieces / 8.0) + 1;
  182. case MSG_REQUEST:
  183. case MSG_CANCEL:
  184. return mlen == 13;
  185. case MSG_PIECE:
  186. return mlen <= PIECE_BLOCKLEN + 9;
  187. default:
  188. return 0;
  189. }
  190. }
  191. static void
  192. net_progress(struct peer *p, size_t length)
  193. {
  194. if (p->net.state == BTP_MSGBODY && p->net.msg_num == MSG_PIECE) {
  195. p->tp->downloaded += length;
  196. p->rate_to_me[btpd.seconds % RATEHISTORY] += length;
  197. }
  198. }
  199. static int
  200. net_state(struct peer *p, const char *buf)
  201. {
  202. switch (p->net.state) {
  203. case SHAKE_PSTR:
  204. if (bcmp(buf, "\x13""BitTorrent protocol", 20) != 0)
  205. goto bad;
  206. net_set_state(p, SHAKE_INFO, 20);
  207. break;
  208. case SHAKE_INFO:
  209. if (p->flags & PF_INCOMING) {
  210. struct torrent *tp = torrent_get_by_hash(buf);
  211. if (tp == NULL)
  212. goto bad;
  213. p->tp = tp;
  214. peer_send(p, nb_create_shake(p->tp));
  215. } else if (bcmp(buf, p->tp->meta.info_hash, 20) != 0)
  216. goto bad;
  217. net_set_state(p, SHAKE_ID, 20);
  218. break;
  219. case SHAKE_ID:
  220. if ((torrent_has_peer(p->tp, buf)
  221. || bcmp(buf, btpd.peer_id, 20) == 0))
  222. goto bad;
  223. bcopy(buf, p->id, 20);
  224. peer_on_shake(p);
  225. net_set_state(p, BTP_MSGSIZE, 4);
  226. break;
  227. case BTP_MSGSIZE:
  228. p->net.msg_len = net_read32(buf);
  229. if (p->net.msg_len != 0)
  230. net_set_state(p, BTP_MSGHEAD, 1);
  231. break;
  232. case BTP_MSGHEAD:
  233. p->net.msg_num = buf[0];
  234. if (!net_mh_ok(p))
  235. goto bad;
  236. else if (p->net.msg_len == 1) {
  237. if (net_dispatch_msg(p, buf) != 0)
  238. goto bad;
  239. net_set_state(p, BTP_MSGSIZE, 4);
  240. } else
  241. net_set_state(p, BTP_MSGBODY, p->net.msg_len - 1);
  242. break;
  243. case BTP_MSGBODY:
  244. if (net_dispatch_msg(p, buf) != 0)
  245. goto bad;
  246. net_set_state(p, BTP_MSGSIZE, 4);
  247. break;
  248. default:
  249. abort();
  250. }
  251. return 0;
  252. bad:
  253. btpd_log(BTPD_L_CONN, "bad data from %p (%u, %u, %u).\n",
  254. p, p->net.state, p->net.msg_len, p->net.msg_num);
  255. peer_kill(p);
  256. return -1;
  257. }
  258. #define GRBUFLEN (1 << 15)
  259. static unsigned long
  260. net_read(struct peer *p, unsigned long rmax)
  261. {
  262. size_t rest = p->net.buf != NULL ? p->net.st_bytes - p->net.off : 0;
  263. char buf[GRBUFLEN];
  264. struct iovec iov[2] = {
  265. {
  266. p->net.buf + p->net.off,
  267. rest
  268. }, {
  269. buf,
  270. sizeof(buf)
  271. }
  272. };
  273. if (rmax > 0) {
  274. if (iov[0].iov_len > rmax)
  275. iov[0].iov_len = rmax;
  276. iov[1].iov_len = min(rmax - iov[0].iov_len, iov[1].iov_len);
  277. }
  278. ssize_t nread = readv(p->sd, iov, 2);
  279. if (nread < 0 && errno == EAGAIN)
  280. goto out;
  281. else if (nread < 0) {
  282. btpd_log(BTPD_L_CONN, "Read error (%s) on %p.\n", strerror(errno), p);
  283. peer_kill(p);
  284. return 0;
  285. } else if (nread == 0) {
  286. btpd_log(BTPD_L_CONN, "Connection closed by %p.\n", p);
  287. peer_kill(p);
  288. return 0;
  289. }
  290. if (rest > 0) {
  291. if (nread < rest) {
  292. p->net.off += nread;
  293. net_progress(p, nread);
  294. goto out;
  295. }
  296. net_progress(p, rest);
  297. if (net_state(p, p->net.buf) != 0)
  298. return nread;
  299. free(p->net.buf);
  300. p->net.buf = NULL;
  301. p->net.off = 0;
  302. }
  303. iov[1].iov_len = nread - rest;
  304. while (p->net.st_bytes <= iov[1].iov_len) {
  305. size_t consumed = p->net.st_bytes;
  306. net_progress(p, consumed);
  307. if (net_state(p, iov[1].iov_base) != 0)
  308. return nread;
  309. iov[1].iov_base += consumed;
  310. iov[1].iov_len -= consumed;
  311. }
  312. if (iov[1].iov_len > 0) {
  313. net_progress(p, iov[1].iov_len);
  314. p->net.off = iov[1].iov_len;
  315. p->net.buf = btpd_malloc(p->net.st_bytes);
  316. bcopy(iov[1].iov_base, p->net.buf, iov[1].iov_len);
  317. }
  318. out:
  319. event_add(&p->in_ev, NULL);
  320. return nread > 0 ? nread : 0;
  321. }
  322. int
  323. net_connect2(struct sockaddr *sa, socklen_t salen, int *sd)
  324. {
  325. if ((*sd = socket(PF_INET, SOCK_STREAM, 0)) == -1)
  326. return errno;
  327. set_nonblocking(*sd);
  328. if (connect(*sd, sa, salen) == -1 && errno != EINPROGRESS) {
  329. btpd_log(BTPD_L_CONN, "Botched connection %s.", strerror(errno));
  330. close(*sd);
  331. return errno;
  332. }
  333. return 0;
  334. }
  335. int
  336. net_connect(const char *ip, int port, int *sd)
  337. {
  338. struct addrinfo hints, *res;
  339. char portstr[6];
  340. assert(btpd.npeers < btpd.maxpeers);
  341. if (snprintf(portstr, sizeof(portstr), "%d", port) >= sizeof(portstr))
  342. return EINVAL;
  343. bzero(&hints, sizeof(hints));
  344. hints.ai_family = AF_UNSPEC;
  345. hints.ai_flags = AI_NUMERICHOST;
  346. hints.ai_socktype = SOCK_STREAM;
  347. if (getaddrinfo(ip, portstr, &hints, &res) != 0)
  348. return errno;
  349. int error = net_connect2(res->ai_addr, res->ai_addrlen, sd);
  350. freeaddrinfo(res);
  351. return error;
  352. }
  353. void
  354. net_connection_cb(int sd, short type, void *arg)
  355. {
  356. int nsd;
  357. nsd = accept(sd, NULL, NULL);
  358. if (nsd < 0) {
  359. if (errno == EWOULDBLOCK || errno == ECONNABORTED)
  360. return;
  361. else
  362. btpd_err("accept4: %s\n", strerror(errno));
  363. }
  364. if (set_nonblocking(nsd) != 0) {
  365. close(nsd);
  366. return;
  367. }
  368. assert(btpd.npeers <= btpd.maxpeers);
  369. if (btpd.npeers == btpd.maxpeers) {
  370. close(nsd);
  371. return;
  372. }
  373. peer_create_in(nsd);
  374. btpd_log(BTPD_L_CONN, "got connection.\n");
  375. }
  376. void
  377. net_bw_rate(void)
  378. {
  379. unsigned sum = 0;
  380. for (int i = 0; i < BWCALLHISTORY - 1; i++) {
  381. btpd.bwrate[i] = btpd.bwrate[i + 1];
  382. sum += btpd.bwrate[i];
  383. }
  384. btpd.bwrate[BWCALLHISTORY - 1] = btpd.bwcalls;
  385. sum += btpd.bwrate[BWCALLHISTORY - 1];
  386. btpd.bwcalls = 0;
  387. btpd.bw_hz_avg = sum / 5.0;
  388. }
  389. void
  390. net_bw_cb(int sd, short type, void *arg)
  391. {
  392. struct peer *p;
  393. btpd.bwcalls++;
  394. double avg_hz;
  395. if (btpd.seconds < BWCALLHISTORY)
  396. avg_hz = btpd.bw_hz;
  397. else
  398. avg_hz = btpd.bw_hz_avg;
  399. btpd.obw_left = btpd.obwlim / avg_hz;
  400. btpd.ibw_left = btpd.ibwlim / avg_hz;
  401. if (btpd.ibwlim > 0) {
  402. while ((p = BTPDQ_FIRST(&btpd.readq)) != NULL && btpd.ibw_left > 0) {
  403. BTPDQ_REMOVE(&btpd.readq, p, rq_entry);
  404. p->flags &= ~PF_ON_READQ;
  405. btpd.ibw_left -= net_read(p, btpd.ibw_left);
  406. }
  407. } else {
  408. while ((p = BTPDQ_FIRST(&btpd.readq)) != NULL) {
  409. BTPDQ_REMOVE(&btpd.readq, p, rq_entry);
  410. p->flags &= ~PF_ON_READQ;
  411. net_read(p, 0);
  412. }
  413. }
  414. if (btpd.obwlim) {
  415. while ((p = BTPDQ_FIRST(&btpd.writeq)) != NULL && btpd.obw_left > 0) {
  416. BTPDQ_REMOVE(&btpd.writeq, p, wq_entry);
  417. p->flags &= ~PF_ON_WRITEQ;
  418. btpd.obw_left -= net_write(p, btpd.obw_left);
  419. }
  420. } else {
  421. while ((p = BTPDQ_FIRST(&btpd.writeq)) != NULL) {
  422. BTPDQ_REMOVE(&btpd.writeq, p, wq_entry);
  423. p->flags &= ~PF_ON_WRITEQ;
  424. net_write(p, 0);
  425. }
  426. }
  427. event_add(&btpd.bwlim, (& (struct timeval) { 0, 1000000 / btpd.bw_hz }));
  428. }
  429. void
  430. net_read_cb(int sd, short type, void *arg)
  431. {
  432. struct peer *p = (struct peer *)arg;
  433. if (btpd.ibwlim == 0)
  434. net_read(p, 0);
  435. else if (btpd.ibw_left > 0)
  436. btpd.ibw_left -= net_read(p, btpd.ibw_left);
  437. else {
  438. p->flags |= PF_ON_READQ;
  439. BTPDQ_INSERT_TAIL(&btpd.readq, p, rq_entry);
  440. }
  441. }
  442. void
  443. net_write_cb(int sd, short type, void *arg)
  444. {
  445. struct peer *p = (struct peer *)arg;
  446. if (type == EV_TIMEOUT) {
  447. btpd_log(BTPD_L_CONN, "Write attempt timed out.\n");
  448. peer_kill(p);
  449. return;
  450. }
  451. if (btpd.obwlim == 0) {
  452. net_write(p, 0);
  453. } else if (btpd.obw_left > 0) {
  454. btpd.obw_left -= net_write(p, btpd.obw_left);
  455. } else {
  456. p->flags |= PF_ON_WRITEQ;
  457. BTPDQ_INSERT_TAIL(&btpd.writeq, p, wq_entry);
  458. }
  459. }