src: fix rpc limit
Co-authored-by: jeffro256 <jeffro256@tutanota.com>
This commit is contained in:
parent
893916ad09
commit
32ccf2abcc
|
@ -1724,9 +1724,9 @@ namespace cryptonote
|
|||
return true;
|
||||
}
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
bool core::get_pool_transactions_info(const std::vector<crypto::hash>& txids, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& txs, bool include_sensitive_txes) const
|
||||
bool core::get_pool_transactions_info(const std::vector<crypto::hash>& txids, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& txs, bool include_sensitive_txes, size_t limit_size) const
|
||||
{
|
||||
return m_mempool.get_transactions_info(txids, txs, include_sensitive_txes);
|
||||
return m_mempool.get_transactions_info(epee::to_span(txids), txs, include_sensitive_txes, limit_size);
|
||||
}
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
bool core::get_pool_transactions(std::vector<transaction>& txs, bool include_sensitive_data) const
|
||||
|
@ -1741,9 +1741,9 @@ namespace cryptonote
|
|||
return true;
|
||||
}
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
bool core::get_pool_info(time_t start_time, bool include_sensitive_txes, size_t max_tx_count, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& added_txs, std::vector<crypto::hash>& remaining_added_txids, std::vector<crypto::hash>& removed_txs, bool& incremental) const
|
||||
bool core::get_pool_info(time_t start_time, bool include_sensitive_txes, size_t max_tx_count, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& added_txs, std::vector<crypto::hash>& remaining_added_txids, std::vector<crypto::hash>& removed_txs, bool& incremental, size_t limit_size) const
|
||||
{
|
||||
return m_mempool.get_pool_info(start_time, include_sensitive_txes, max_tx_count, added_txs, remaining_added_txids, removed_txs, incremental);
|
||||
return m_mempool.get_pool_info(start_time, include_sensitive_txes, max_tx_count, added_txs, remaining_added_txids, removed_txs, incremental, limit_size);
|
||||
}
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
bool core::get_pool_transaction_stats(struct txpool_stats& stats, bool include_sensitive_data) const
|
||||
|
|
|
@ -514,7 +514,7 @@ namespace cryptonote
|
|||
*
|
||||
* @note see tx_memory_pool::get_pool_transactions_info
|
||||
*/
|
||||
bool get_pool_transactions_info(const std::vector<crypto::hash>& txids, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& txs, bool include_sensitive_txes = false) const;
|
||||
bool get_pool_transactions_info(const std::vector<crypto::hash>& txids, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& txs, bool include_sensitive_txes = false, size_t limit_size = 0) const;
|
||||
|
||||
/**
|
||||
* @copydoc tx_memory_pool::get_pool_info
|
||||
|
@ -523,7 +523,7 @@ namespace cryptonote
|
|||
*
|
||||
* @note see tx_memory_pool::get_pool_info
|
||||
*/
|
||||
bool get_pool_info(time_t start_time, bool include_sensitive_txes, size_t max_tx_count, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& added_txs, std::vector<crypto::hash>& remaining_added_txids, std::vector<crypto::hash>& removed_txs, bool& incremental) const;
|
||||
bool get_pool_info(time_t start_time, bool include_sensitive_txes, size_t max_tx_count, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& added_txs, std::vector<crypto::hash>& remaining_added_txids, std::vector<crypto::hash>& removed_txs, bool& incremental, size_t limit_size = 0) const;
|
||||
|
||||
/**
|
||||
* @copydoc tx_memory_pool::get_transactions
|
||||
|
|
|
@ -694,19 +694,23 @@ namespace cryptonote
|
|||
return true;
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
bool tx_memory_pool::get_transactions_info(const std::vector<crypto::hash>& txids, std::vector<std::pair<crypto::hash, tx_details>>& txs, bool include_sensitive) const
|
||||
bool tx_memory_pool::get_transactions_info(const epee::span<const crypto::hash> txids, std::vector<std::pair<crypto::hash, tx_details>>& txs, bool include_sensitive, size_t cumul_txblob_size_limit) const
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(m_transactions_lock);
|
||||
CRITICAL_REGION_LOCAL1(m_blockchain);
|
||||
|
||||
txs.clear();
|
||||
txs.reserve(std::min<size_t>(txids.size(), 1000000)); // reserve limited to 1 million
|
||||
size_t cumul_txblob_size = 0;
|
||||
|
||||
for (const auto &it: txids)
|
||||
for (size_t i = 0; i < txids.size(); ++i)
|
||||
{
|
||||
const crypto::hash &it{txids[i]};
|
||||
tx_details details;
|
||||
bool success = get_transaction_info(it, details, include_sensitive, true/*include_blob*/);
|
||||
if (success)
|
||||
if (success && (!cumul_txblob_size_limit || (cumul_txblob_size + details.blob_size < cumul_txblob_size_limit)))
|
||||
{
|
||||
cumul_txblob_size += details.blob_size;
|
||||
txs.push_back(std::make_pair(it, std::move(details)));
|
||||
}
|
||||
}
|
||||
|
@ -985,7 +989,7 @@ namespace cryptonote
|
|||
}, false, category);
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
bool tx_memory_pool::get_pool_info(time_t start_time, bool include_sensitive, size_t max_tx_count, std::vector<std::pair<crypto::hash, tx_details>>& added_txs, std::vector<crypto::hash>& remaining_added_txids, std::vector<crypto::hash>& removed_txs, bool& incremental) const
|
||||
bool tx_memory_pool::get_pool_info(time_t start_time, bool include_sensitive, size_t max_tx_count, std::vector<std::pair<crypto::hash, tx_details>>& added_txs, std::vector<crypto::hash>& remaining_added_txids, std::vector<crypto::hash>& removed_txs, bool& incremental, size_t cumul_limit_size) const
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(m_transactions_lock);
|
||||
CRITICAL_REGION_LOCAL1(m_blockchain);
|
||||
|
@ -1016,38 +1020,14 @@ namespace cryptonote
|
|||
remaining_added_txids.clear();
|
||||
removed_txs.clear();
|
||||
|
||||
std::vector<crypto::hash> txids;
|
||||
if (!incremental)
|
||||
{
|
||||
LOG_PRINT_L2("Giving back the whole pool");
|
||||
// Give back the whole pool in 'added_txs'; because calling 'get_transaction_info' right inside the
|
||||
// anonymous method somehow results in an LMDB error with transactions we have to build a list of
|
||||
// ids first and get the full info afterwards
|
||||
get_transaction_hashes(txids, include_sensitive);
|
||||
if (txids.size() > max_tx_count)
|
||||
{
|
||||
remaining_added_txids = std::vector<crypto::hash>(txids.begin() + max_tx_count, txids.end());
|
||||
txids.erase(txids.begin() + max_tx_count, txids.end());
|
||||
}
|
||||
get_transactions_info(txids, added_txs, include_sensitive);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Give back incrementally, based on time of entry into the map
|
||||
for (const auto &pit : m_added_txs_by_id)
|
||||
// If incremental, handle removed TXIDs first since it's important that txs are removed
|
||||
// from synchronizers' pools, and we need need to estimate how much space we have left to
|
||||
// request full-bodied txs
|
||||
if (incremental)
|
||||
{
|
||||
if (pit.second >= start_time)
|
||||
txids.push_back(pit.first);
|
||||
}
|
||||
get_transactions_info(txids, added_txs, include_sensitive);
|
||||
if (added_txs.size() > max_tx_count)
|
||||
{
|
||||
remaining_added_txids.reserve(added_txs.size() - max_tx_count);
|
||||
for (size_t i = max_tx_count; i < added_txs.size(); ++i)
|
||||
remaining_added_txids.push_back(added_txs[i].first);
|
||||
added_txs.erase(added_txs.begin() + max_tx_count, added_txs.end());
|
||||
}
|
||||
|
||||
std::multimap<time_t, removed_tx_info>::const_iterator rit = m_removed_txs_by_time.lower_bound(start_time);
|
||||
while (rit != m_removed_txs_by_time.end())
|
||||
{
|
||||
|
@ -1057,6 +1037,63 @@ namespace cryptonote
|
|||
}
|
||||
++rit;
|
||||
}
|
||||
}
|
||||
|
||||
// Build a list of TXIDs that we want information for eventually. When `!incremental`, we might
|
||||
// collect TXIDs that have already left the pool which will cause a followup RPC call that
|
||||
// fetches non-existent txs, but that's *okay* since a non-incremental caller probably doesn't
|
||||
// care about being incredibly efficient. This problem is also unavoidable anyways if we need to
|
||||
// ever return anything inside `remaining_added_txids`, since those txs might have left the mempool
|
||||
// by the time the followup RPC call reaches the daemon
|
||||
std::vector<crypto::hash> txids;
|
||||
txids.reserve(m_added_txs_by_id.size());
|
||||
for (const auto &pit : m_added_txs_by_id)
|
||||
{
|
||||
const bool relevant_txid{!incremental || pit.second >= start_time};
|
||||
if (relevant_txid)
|
||||
txids.push_back(pit.first);
|
||||
}
|
||||
|
||||
// Estimate max cumulative size left for full tx blobs
|
||||
const size_t removed_txids_clawback{32 * removed_txs.size()};
|
||||
const size_t remaining_txids_clawback{32 * txids.size()};
|
||||
const size_t added_tx_txid_clawback(32 * txids.size());
|
||||
const size_t total_clawback{removed_txids_clawback + remaining_txids_clawback + added_tx_txid_clawback};
|
||||
const size_t cumulative_txblob_size_limit{cumul_limit_size > total_clawback ? cumul_limit_size - total_clawback : 0};
|
||||
|
||||
// Perform TX info fetch, limited to max_tx_count and cumulative_txblob_size_limit
|
||||
const size_t max_full_tx_fetch_count{std::min(txids.size(), max_tx_count)};
|
||||
if (cumulative_txblob_size_limit && max_full_tx_fetch_count)
|
||||
{
|
||||
const epee::span<const crypto::hash> txid_fetch_span{&txids[0], max_full_tx_fetch_count};
|
||||
if (!get_transactions_info(txid_fetch_span, added_txs, include_sensitive, cumulative_txblob_size_limit))
|
||||
return false;
|
||||
}
|
||||
|
||||
// Populate `remaining_added_txids` with all TXIDs in `txids` and not in `added_txs`
|
||||
if (added_txs.size() < txids.size())
|
||||
{
|
||||
const size_t num_remaining{added_txs.size() - max_tx_count};
|
||||
remaining_added_txids.reserve(num_remaining);
|
||||
|
||||
// This iteration code assumes that the get_transactions_info() method A) returns elements in
|
||||
// the same order as they are passed in (skipping failures) and B) does not return TXIDs which
|
||||
// don't exist in the input. If these assumptions don't hold then remaining_added_txids will
|
||||
// be wrong, but it shouldn't cause any undefined behavior outside of that and should terminate
|
||||
// in a short finite time period O(N) with N = txids.size()
|
||||
auto txid_it = txids.cbegin();
|
||||
auto added_it = added_txs.cbegin();
|
||||
while (txid_it != txids.cend())
|
||||
{
|
||||
if (added_it != added_txs.cend() && added_it->first == *txid_it)
|
||||
++added_it;
|
||||
else
|
||||
remaining_added_txids.push_back(*txid_it);
|
||||
|
||||
++txid_it;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
|
|
|
@ -458,14 +458,14 @@ namespace cryptonote
|
|||
};
|
||||
|
||||
/**
|
||||
* @brief get infornation about a single transaction
|
||||
* @brief get information about a single transaction
|
||||
*/
|
||||
bool get_transaction_info(const crypto::hash &txid, tx_details &td, bool include_sensitive_data, bool include_blob = false) const;
|
||||
|
||||
/**
|
||||
* @brief get information about multiple transactions
|
||||
*/
|
||||
bool get_transactions_info(const std::vector<crypto::hash>& txids, std::vector<std::pair<crypto::hash, tx_details>>& txs, bool include_sensitive_data = false) const;
|
||||
bool get_transactions_info(const epee::span<const crypto::hash> txids, std::vector<std::pair<crypto::hash, tx_details>>& txs, bool include_sensitive_data = false, size_t cumul_txblob_size_limit = 0) const;
|
||||
|
||||
/**
|
||||
* @brief get transactions not in the passed set
|
||||
|
@ -477,7 +477,7 @@ namespace cryptonote
|
|||
*
|
||||
* @return true on success, false on error
|
||||
*/
|
||||
bool get_pool_info(time_t start_time, bool include_sensitive, size_t max_tx_count, std::vector<std::pair<crypto::hash, tx_details>>& added_txs, std::vector<crypto::hash>& remaining_added_txids, std::vector<crypto::hash>& removed_txs, bool& incremental) const;
|
||||
bool get_pool_info(time_t start_time, bool include_sensitive, size_t max_tx_count, std::vector<std::pair<crypto::hash, tx_details>>& added_txs, std::vector<crypto::hash>& remaining_added_txids, std::vector<crypto::hash>& removed_txs, bool& incremental, size_t cumul_limit_size = 0) const;
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
@ -652,53 +652,7 @@ namespace cryptonote
|
|||
|
||||
res.pool_info_extent = COMMAND_RPC_GET_BLOCKS_FAST::NONE;
|
||||
|
||||
if (get_pool)
|
||||
{
|
||||
const bool restricted = m_restricted && ctx;
|
||||
const bool request_has_rpc_origin = ctx != NULL;
|
||||
const bool allow_sensitive = !request_has_rpc_origin || !restricted;
|
||||
const size_t max_tx_count = restricted ? RESTRICTED_TRANSACTIONS_COUNT : std::numeric_limits<size_t>::max();
|
||||
|
||||
bool incremental;
|
||||
std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>> added_pool_txs;
|
||||
bool success = m_core.get_pool_info((time_t)req.pool_info_since, allow_sensitive, max_tx_count, added_pool_txs, res.remaining_added_pool_txids, res.removed_pool_txids, incremental);
|
||||
if (success)
|
||||
{
|
||||
res.added_pool_txs.clear();
|
||||
if (m_rpc_payment)
|
||||
{
|
||||
CHECK_PAYMENT_SAME_TS(req, res, added_pool_txs.size() * COST_PER_TX + (res.remaining_added_pool_txids.size() + res.removed_pool_txids.size()) * COST_PER_POOL_HASH);
|
||||
}
|
||||
for (const auto &added_pool_tx: added_pool_txs)
|
||||
{
|
||||
COMMAND_RPC_GET_BLOCKS_FAST::pool_tx_info info;
|
||||
info.tx_hash = added_pool_tx.first;
|
||||
std::stringstream oss;
|
||||
binary_archive<true> ar(oss);
|
||||
bool r = req.prune
|
||||
? const_cast<cryptonote::transaction&>(added_pool_tx.second.tx).serialize_base(ar)
|
||||
: ::serialization::serialize(ar, const_cast<cryptonote::transaction&>(added_pool_tx.second.tx));
|
||||
if (!r)
|
||||
{
|
||||
res.status = "Failed to serialize transaction";
|
||||
return true;
|
||||
}
|
||||
info.tx_blob = oss.str();
|
||||
info.double_spend_seen = added_pool_tx.second.double_spend_seen;
|
||||
res.added_pool_txs.push_back(std::move(info));
|
||||
}
|
||||
}
|
||||
if (success)
|
||||
{
|
||||
res.pool_info_extent = incremental ? COMMAND_RPC_GET_BLOCKS_FAST::INCREMENTAL : COMMAND_RPC_GET_BLOCKS_FAST::FULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
res.status = "Failed to get pool info";
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t cumul_block_data_size = 0;
|
||||
if (get_blocks)
|
||||
{
|
||||
// quick check for noop
|
||||
|
@ -748,7 +702,7 @@ namespace cryptonote
|
|||
|
||||
CHECK_PAYMENT_SAME_TS(req, res, bs.size() * COST_PER_BLOCK);
|
||||
|
||||
size_t size = 0, ntxes = 0;
|
||||
size_t ntxes = 0;
|
||||
res.blocks.reserve(bs.size());
|
||||
res.output_indices.reserve(bs.size());
|
||||
for(auto& bd: bs)
|
||||
|
@ -756,7 +710,7 @@ namespace cryptonote
|
|||
res.blocks.resize(res.blocks.size()+1);
|
||||
res.blocks.back().pruned = req.prune;
|
||||
res.blocks.back().block = bd.first.first;
|
||||
size += bd.first.first.size();
|
||||
cumul_block_data_size += bd.first.first.size();
|
||||
res.output_indices.push_back(COMMAND_RPC_GET_BLOCKS_FAST::block_output_indices());
|
||||
ntxes += bd.second.size();
|
||||
res.output_indices.back().indices.reserve(1 + bd.second.size());
|
||||
|
@ -768,7 +722,7 @@ namespace cryptonote
|
|||
res.blocks.back().txs.push_back({std::move(i->second), crypto::null_hash});
|
||||
i->second.clear();
|
||||
i->second.shrink_to_fit();
|
||||
size += res.blocks.back().txs.back().blob.size();
|
||||
cumul_block_data_size += res.blocks.back().txs.back().blob.size();
|
||||
}
|
||||
|
||||
const size_t n_txes_to_lookup = bd.second.size() + (req.no_miner_tx ? 0 : 1);
|
||||
|
@ -790,7 +744,54 @@ namespace cryptonote
|
|||
res.output_indices.back().indices.push_back({std::move(indices[i])});
|
||||
}
|
||||
}
|
||||
MDEBUG("on_get_blocks: " << bs.size() << " blocks, " << ntxes << " txes, size " << size);
|
||||
MDEBUG("on_get_blocks: " << bs.size() << " blocks, " << ntxes << " txes, size " << cumul_block_data_size);
|
||||
}
|
||||
|
||||
if (get_pool)
|
||||
{
|
||||
const bool restricted = m_restricted && ctx;
|
||||
const bool request_has_rpc_origin = ctx != NULL;
|
||||
const bool allow_sensitive = !request_has_rpc_origin || !restricted;
|
||||
const size_t max_tx_count = restricted ? RESTRICTED_TRANSACTIONS_COUNT : std::numeric_limits<size_t>::max();
|
||||
|
||||
bool incremental;
|
||||
std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>> added_pool_txs;
|
||||
bool success = m_core.get_pool_info((time_t)req.pool_info_since, allow_sensitive, max_tx_count, added_pool_txs, res.remaining_added_pool_txids, res.removed_pool_txids, incremental, (LEVIN_DEFAULT_MAX_PACKET_SIZE * 0.9) - cumul_block_data_size);
|
||||
if (success)
|
||||
{
|
||||
res.added_pool_txs.clear();
|
||||
if (m_rpc_payment)
|
||||
{
|
||||
CHECK_PAYMENT_SAME_TS(req, res, added_pool_txs.size() * COST_PER_TX + (res.remaining_added_pool_txids.size() + res.removed_pool_txids.size()) * COST_PER_POOL_HASH);
|
||||
}
|
||||
for (const auto &added_pool_tx: added_pool_txs)
|
||||
{
|
||||
COMMAND_RPC_GET_BLOCKS_FAST::pool_tx_info info;
|
||||
info.tx_hash = added_pool_tx.first;
|
||||
std::stringstream oss;
|
||||
binary_archive<true> ar(oss);
|
||||
bool r = req.prune
|
||||
? const_cast<cryptonote::transaction&>(added_pool_tx.second.tx).serialize_base(ar)
|
||||
: ::serialization::serialize(ar, const_cast<cryptonote::transaction&>(added_pool_tx.second.tx));
|
||||
if (!r)
|
||||
{
|
||||
res.status = "Failed to serialize transaction";
|
||||
return true;
|
||||
}
|
||||
info.tx_blob = oss.str();
|
||||
info.double_spend_seen = added_pool_tx.second.double_spend_seen;
|
||||
res.added_pool_txs.push_back(std::move(info));
|
||||
}
|
||||
}
|
||||
if (success)
|
||||
{
|
||||
res.pool_info_extent = incremental ? COMMAND_RPC_GET_BLOCKS_FAST::INCREMENTAL : COMMAND_RPC_GET_BLOCKS_FAST::FULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
res.status = "Failed to get pool info";
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
res.status = CORE_RPC_STATUS_OK;
|
||||
|
|
Loading…
Reference in New Issue