From 9f4070eeb80442607c60d50f5f32d31cdb8e17d3 Mon Sep 17 00:00:00 2001 From: Richard Nyberg Date: Tue, 21 Feb 2006 21:59:49 +0000 Subject: [PATCH] There's now one list with requests per piece instead of one per block. The network buffers for requests are now allocated as they're needed. Before they were allocated at the same time as their corresponding piece. This lowers btpd's memory consumption. --- btpd/download.c | 28 ++++++----- btpd/download_subr.c | 113 ++++++++++++++++++++++++------------------- btpd/net_types.h | 11 ++--- btpd/peer.c | 15 +++--- btpd/peer.h | 2 +- 5 files changed, 90 insertions(+), 79 deletions(-) diff --git a/btpd/download.c b/btpd/download.c index e2b3888..2bb62fc 100644 --- a/btpd/download.c +++ b/btpd/download.c @@ -155,36 +155,38 @@ dl_on_block(struct peer *p, struct block_request *req, uint32_t index, uint32_t begin, uint32_t length, const uint8_t *data) { struct net *n = p->n; - struct block *blk = req->blk; - struct piece *pc = blk->pc; + struct piece *pc = dl_find_piece(n, index); cm_put_bytes(p->n->tp, index, begin, data, length); pc->ngot++; if (n->endgame) { - struct block_request *req; + struct block_request *req, *next; struct net_buf *cancel = nb_create_cancel(index, begin, length); nb_hold(cancel); - BTPDQ_FOREACH(req, &blk->reqs, blk_entry) { - if (req->p != p) - peer_cancel(req->p, req, cancel); - pc->nreqs--; + BTPDQ_FOREACH(req, &pc->reqs, blk_entry) { + if (nb_get_begin(req->msg) == begin) { + if (req->p != p) + peer_cancel(req->p, req, cancel); + pc->nreqs--; + } } nb_drop(cancel); dl_piece_reorder_eg(pc); - req = BTPDQ_FIRST(&blk->reqs); - while (req != NULL) { - struct block_request *next = BTPDQ_NEXT(req, blk_entry); + BTPDQ_FOREACH_MUTABLE(req, &pc->reqs, blk_entry, next) { + if (nb_get_begin(req->msg) != begin) + continue; + BTPDQ_REMOVE(&pc->reqs, req, blk_entry); + nb_drop(req->msg); if (peer_leech_ok(req->p) && !peer_laden(req->p)) dl_assign_requests_eg(req->p); free(req); - req = next; } - BTPDQ_INIT(&blk->reqs); if (pc->ngot == pc->nblocks) cm_test_piece(pc->n->tp, pc->index); } else { - BTPDQ_REMOVE(&blk->reqs, req, blk_entry); + BTPDQ_REMOVE(&pc->reqs, req, blk_entry); + nb_drop(req->msg); free(req); pc->nreqs--; // XXX: Needs to be looked at if we introduce snubbing. diff --git a/btpd/download_subr.c b/btpd/download_subr.c index 67d9e85..c78f5b1 100644 --- a/btpd/download_subr.c +++ b/btpd/download_subr.c @@ -35,13 +35,12 @@ piece_alloc(struct net *n, uint32_t index) assert(!has_bit(n->busy_field, index) && n->npcs_busy < n->tp->meta.npieces); struct piece *pc; - size_t mem, field, blocks; + size_t mem, field; unsigned nblocks; nblocks = torrent_piece_blocks(n->tp, index); - blocks = sizeof(pc->blocks[0]) * nblocks; field = (size_t)ceil(nblocks / 8.0); - mem = sizeof(*pc) + field + blocks; + mem = sizeof(*pc) + field; pc = btpd_calloc(1, mem); pc->n = n; @@ -59,16 +58,7 @@ piece_alloc(struct net *n, uint32_t index) pc->ngot++; assert(pc->ngot < pc->nblocks); - pc->blocks = (struct block *)(pc->down_field + field); - for (unsigned i = 0; i < nblocks; i++) { - uint32_t start = i * PIECE_BLOCKLEN; - uint32_t len = torrent_block_size(n->tp, index, nblocks, i); - struct block *blk = &pc->blocks[i]; - blk->pc = pc; - BTPDQ_INIT(&blk->reqs); - blk->msg = nb_create_request(index, start, len); - nb_hold(blk->msg); - } + BTPDQ_INIT(&pc->reqs); n->npcs_busy++; set_bit(n->busy_field, index); @@ -80,18 +70,20 @@ void piece_free(struct piece *pc) { struct net *n = pc->n; + struct block_request *req, *next; assert(n->npcs_busy > 0); n->npcs_busy--; clear_bit(n->busy_field, pc->index); BTPDQ_REMOVE(&pc->n->getlst, pc, entry); - for (unsigned i = 0; i < pc->nblocks; i++) { - struct block_request *req = BTPDQ_FIRST(&pc->blocks[i].reqs); - while (req != NULL) { - struct block_request *next = BTPDQ_NEXT(req, blk_entry); - free(req); - req = next; - } - nb_drop(pc->blocks[i].msg); + BTPDQ_FOREACH_MUTABLE(req, &pc->reqs, blk_entry, next) { + nb_drop(req->msg); + free(req); + } + if (pc->eg_reqs != NULL) { + for (uint32_t i = 0; i < pc->nblocks; i++) + if (pc->eg_reqs[i] != NULL) + nb_drop(pc->eg_reqs[i]); + free(pc->eg_reqs); } free(pc); } @@ -161,9 +153,18 @@ dl_enter_endgame(struct net *n) pi = 0; BTPDQ_FOREACH(pc, &n->getlst, entry) { + struct block_request *req; for (unsigned i = 0; i < pc->nblocks; i++) clear_bit(pc->down_field, i); pc->nbusy = 0; + pc->eg_reqs = btpd_calloc(pc->nblocks, sizeof(struct net_buf *)); + BTPDQ_FOREACH(req, &pc->reqs, blk_entry) { + uint32_t blki = nb_get_begin(req->msg) / PIECE_BLOCKLEN; + if (pc->eg_reqs[blki] == NULL) { + pc->eg_reqs[blki] = req->msg; + nb_hold(req->msg); + } + } pcs[pi] = pc; pi++; } @@ -308,6 +309,30 @@ dl_on_piece_unfull(struct piece *pc) #define INCNEXTBLOCK(pc) \ (pc)->next_block = ((pc)->next_block + 1) % (pc)->nblocks +static struct block_request * +dl_new_request(struct peer *p, struct piece *pc, struct net_buf *msg) +{ + if (msg == NULL) { + uint32_t block = pc->next_block; + uint32_t start = block * PIECE_BLOCKLEN; + uint32_t length = + torrent_block_size(pc->n->tp, pc->index, pc->nblocks, block); + msg = nb_create_request(pc->index, start, length); + } + struct block_request *req = btpd_malloc(sizeof(*req)); + req->p = p; + req->msg = msg; + nb_hold(req->msg); + BTPDQ_INSERT_TAIL(&pc->reqs, req, blk_entry); + pc->nreqs++; + if (!pc->n->endgame) { + set_bit(pc->down_field, pc->next_block); + pc->nbusy++; + } + INCNEXTBLOCK(pc); + peer_request(p, req); + return req; +} /* * Request as many blocks as possible on this piece from @@ -324,20 +349,8 @@ dl_piece_assign_requests(struct piece *pc, struct peer *p) while ((has_bit(pc->have_field, pc->next_block) || has_bit(pc->down_field, pc->next_block))) INCNEXTBLOCK(pc); - - struct block *blk = &pc->blocks[pc->next_block]; - struct block_request *req = btpd_malloc(sizeof(*req)); - req->p = p; - req->blk = blk; - BTPDQ_INSERT_TAIL(&blk->reqs, req, blk_entry); - - peer_request(p, req); - - set_bit(pc->down_field, pc->next_block); - pc->nbusy++; - pc->nreqs++; + dl_new_request(p, pc, NULL); count++; - INCNEXTBLOCK(pc); } while (!piece_full(pc) && !peer_laden(p)); if (piece_full(pc)) @@ -392,25 +405,25 @@ dl_unassign_requests(struct peer *p) { while (p->nreqs_out > 0) { struct block_request *req = BTPDQ_FIRST(&p->my_reqs); - struct piece *pc = req->blk->pc; + struct piece *pc = dl_find_piece(p->n, nb_get_index(req->msg)); int was_full = piece_full(pc); while (req != NULL) { struct block_request *next = BTPDQ_NEXT(req, p_entry); - uint32_t blki = nb_get_begin(req->blk->msg) / PIECE_BLOCKLEN; - struct block *blk = req->blk; + uint32_t blki = nb_get_begin(req->msg) / PIECE_BLOCKLEN; // XXX: Needs to be looked at if we introduce snubbing. assert(has_bit(pc->down_field, blki)); clear_bit(pc->down_field, blki); pc->nbusy--; BTPDQ_REMOVE(&p->my_reqs, req, p_entry); p->nreqs_out--; - BTPDQ_REMOVE(&blk->reqs, req, blk_entry); + BTPDQ_REMOVE(&pc->reqs, req, blk_entry); + nb_drop(req->msg); free(req); pc->nreqs--; - while (next != NULL && next->blk->pc != pc) + while (next != NULL && nb_get_index(next->msg) != pc->index) next = BTPDQ_NEXT(next, p_entry); req = next; } @@ -430,17 +443,16 @@ dl_piece_assign_requests_eg(struct piece *pc, struct peer *p) unsigned first_block = pc->next_block; do { if ((has_bit(pc->have_field, pc->next_block) - || peer_requested(p, &pc->blocks[pc->next_block]))) { + || peer_requested(p, pc->index, pc->next_block))) { INCNEXTBLOCK(pc); continue; } - struct block_request *req = btpd_calloc(1, sizeof(*req)); - req->blk = &pc->blocks[pc->next_block]; - req->p = p; - BTPDQ_INSERT_TAIL(&pc->blocks[pc->next_block].reqs, req, blk_entry); - pc->nreqs++; - INCNEXTBLOCK(pc); - peer_request(p, req); + struct block_request *req = + dl_new_request(p, pc, pc->eg_reqs[pc->next_block]); + if (pc->eg_reqs[pc->next_block] == NULL) { + pc->eg_reqs[pc->next_block] = req->msg; + nb_hold(req->msg); + } } while (!peer_laden(p) && pc->next_block != first_block); } @@ -482,7 +494,7 @@ dl_unassign_requests_eg(struct peer *p) while (p->nreqs_out > 0) { req = BTPDQ_FIRST(&p->my_reqs); - pc = req->blk->pc; + pc = dl_find_piece(p->n, nb_get_index(req->msg)); BTPDQ_REMOVE(&pc->n->getlst, pc, entry); BTPDQ_INSERT_HEAD(&tmp, pc, entry); @@ -490,11 +502,12 @@ dl_unassign_requests_eg(struct peer *p) struct block_request *next = BTPDQ_NEXT(req, p_entry); BTPDQ_REMOVE(&p->my_reqs, req, p_entry); p->nreqs_out--; - BTPDQ_REMOVE(&req->blk->reqs, req, blk_entry); + BTPDQ_REMOVE(&pc->reqs, req, blk_entry); + nb_drop(req->msg); free(req); pc->nreqs--; - while (next != NULL && next->blk->pc != pc) + while (next != NULL && nb_get_index(next->msg) != pc->index) next = BTPDQ_NEXT(next, p_entry); req = next; } diff --git a/btpd/net_types.h b/btpd/net_types.h index 58b1e8e..5359867 100644 --- a/btpd/net_types.h +++ b/btpd/net_types.h @@ -90,7 +90,8 @@ struct piece { unsigned nbusy; unsigned next_block; - struct block *blocks; + struct net_buf **eg_reqs; + struct block_request_tq reqs; const uint8_t *have_field; uint8_t *down_field; @@ -98,15 +99,9 @@ struct piece { BTPDQ_ENTRY(piece) entry; }; -struct block { - struct piece *pc; - struct net_buf *msg; - struct block_request_tq reqs; -}; - struct block_request { struct peer *p; - struct block *blk; + struct net_buf *msg; BTPDQ_ENTRY(block_request) p_entry; BTPDQ_ENTRY(block_request) blk_entry; }; diff --git a/btpd/peer.c b/btpd/peer.c index 8fbfcfd..c8716a1 100644 --- a/btpd/peer.c +++ b/btpd/peer.c @@ -160,15 +160,16 @@ peer_request(struct peer *p, struct block_request *req) assert(p->nreqs_out < MAXPIPEDREQUESTS); p->nreqs_out++; BTPDQ_INSERT_TAIL(&p->my_reqs, req, p_entry); - peer_send(p, req->blk->msg); + peer_send(p, req->msg); } int -peer_requested(struct peer *p, struct block *blk) +peer_requested(struct peer *p, uint32_t piece, uint32_t block) { + uint32_t begin = block * PIECE_BLOCKLEN; struct block_request *req; BTPDQ_FOREACH(req, &p->my_reqs, p_entry) - if (req->blk == blk) + if (nb_get_index(req->msg) == piece && nb_get_begin(req->msg) == begin) return 1; return 0; } @@ -182,7 +183,7 @@ peer_cancel(struct peer *p, struct block_request *req, struct net_buf *nb) int removed = 0; struct nb_link *nl; BTPDQ_FOREACH(nl, &p->outq, entry) { - if (nl->nb == req->blk->msg) { + if (nl->nb == req->msg) { removed = peer_unsend(p, nl); break; } @@ -448,9 +449,9 @@ peer_on_piece(struct peer *p, uint32_t index, uint32_t begin, { struct block_request *req; BTPDQ_FOREACH(req, &p->my_reqs, p_entry) - if ((nb_get_begin(req->blk->msg) == begin && - nb_get_index(req->blk->msg) == index && - nb_get_length(req->blk->msg) == length)) + if ((nb_get_begin(req->msg) == begin && + nb_get_index(req->msg) == index && + nb_get_length(req->msg) == length)) break; if (req != NULL) { btpd_log(BTPD_L_MSG, "received piece(%u,%u,%u) from %p\n", diff --git a/btpd/peer.h b/btpd/peer.h index afc5cf0..cdf1796 100644 --- a/btpd/peer.h +++ b/btpd/peer.h @@ -29,7 +29,7 @@ void peer_request(struct peer *p, struct block_request *req); void peer_cancel(struct peer *p, struct block_request *req, struct net_buf *nb); -int peer_requested(struct peer *p, struct block *blk); +int peer_requested(struct peer *p, uint32_t piece, uint32_t block); void peer_create_in(int sd); void peer_create_out(struct net *n, const uint8_t *id,