/root/bitcoin/src/txdb.cpp
Line | Count | Source |
1 | | // Copyright (c) 2009-2010 Satoshi Nakamoto |
2 | | // Copyright (c) 2009-present The Bitcoin Core developers |
3 | | // Distributed under the MIT software license, see the accompanying |
4 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
5 | | |
6 | | #include <txdb.h> |
7 | | |
8 | | #include <coins.h> |
9 | | #include <dbwrapper.h> |
10 | | #include <logging/timer.h> |
11 | | #include <primitives/transaction.h> |
12 | | #include <random.h> |
13 | | #include <serialize.h> |
14 | | #include <uint256.h> |
15 | | #include <util/log.h> |
16 | | #include <util/vector.h> |
17 | | |
18 | | #include <cassert> |
19 | | #include <cstdlib> |
20 | | #include <iterator> |
21 | | #include <utility> |
22 | | |
23 | | static constexpr uint8_t DB_COIN{'C'}; |
24 | | static constexpr uint8_t DB_BEST_BLOCK{'B'}; |
25 | | static constexpr uint8_t DB_HEAD_BLOCKS{'H'}; |
26 | | // Keys used in previous version that might still be found in the DB: |
27 | | static constexpr uint8_t DB_COINS{'c'}; |
28 | | |
29 | | // Threshold for warning when writing this many dirty cache entries to disk. |
30 | | static constexpr size_t WARN_FLUSH_COINS_COUNT{10'000'000}; |
31 | | |
32 | | bool CCoinsViewDB::NeedsUpgrade() |
33 | 0 | { |
34 | 0 | std::unique_ptr<CDBIterator> cursor{m_db->NewIterator()}; |
35 | | // DB_COINS was deprecated in v0.15.0, commit |
36 | | // 1088b02f0ccd7358d2b7076bb9e122d59d502d02 |
37 | 0 | cursor->Seek(std::make_pair(DB_COINS, uint256{})); |
38 | 0 | return cursor->Valid(); |
39 | 0 | } |
40 | | |
41 | | namespace { |
42 | | |
43 | | struct CoinEntry { |
44 | | COutPoint* outpoint; |
45 | | uint8_t key{DB_COIN}; |
46 | 0 | explicit CoinEntry(const COutPoint* ptr) : outpoint(const_cast<COutPoint*>(ptr)) {} |
47 | | |
48 | 0 | SERIALIZE_METHODS(CoinEntry, obj) { READWRITE(obj.key, obj.outpoint->hash, VARINT(obj.outpoint->n)); }Line | Count | Source | 146 | 0 | #define READWRITE(...) (ser_action.SerReadWriteMany(s, __VA_ARGS__)) |
| SERIALIZE_METHODS(CoinEntry, obj) { READWRITE(obj.key, obj.outpoint->hash, VARINT(obj.outpoint->n)); }Line | Count | Source | 146 | 0 | #define READWRITE(...) (ser_action.SerReadWriteMany(s, __VA_ARGS__)) |
Unexecuted instantiation: txdb.cpp:void (anonymous namespace)::CoinEntry::SerializationOps<DataStream, (anonymous namespace)::CoinEntry const, ActionSerialize>((anonymous namespace)::CoinEntry const&, DataStream&, ActionSerialize) Unexecuted instantiation: txdb.cpp:void (anonymous namespace)::CoinEntry::SerializationOps<DataStream, (anonymous namespace)::CoinEntry, ActionUnserialize>((anonymous namespace)::CoinEntry&, DataStream&, ActionUnserialize) |
49 | | }; |
50 | | |
51 | | } // namespace |
52 | | |
53 | | CCoinsViewDB::CCoinsViewDB(DBParams db_params, CoinsViewOptions options) : |
54 | 0 | m_db_params{std::move(db_params)}, |
55 | 0 | m_options{std::move(options)}, |
56 | 0 | m_db{std::make_unique<CDBWrapper>(m_db_params)} { } |
57 | | |
58 | | void CCoinsViewDB::ResizeCache(size_t new_cache_size) |
59 | 0 | { |
60 | | // We can't do this operation with an in-memory DB since we'll lose all the coins upon |
61 | | // reset. |
62 | 0 | if (!m_db_params.memory_only) { |
63 | | // Have to do a reset first to get the original `m_db` state to release its |
64 | | // filesystem lock. |
65 | 0 | m_db.reset(); |
66 | 0 | m_db_params.cache_bytes = new_cache_size; |
67 | 0 | m_db_params.wipe_data = false; |
68 | 0 | m_db = std::make_unique<CDBWrapper>(m_db_params); |
69 | 0 | } |
70 | 0 | } |
71 | | |
72 | | std::optional<Coin> CCoinsViewDB::GetCoin(const COutPoint& outpoint) const |
73 | 0 | { |
74 | 0 | if (Coin coin; m_db->Read(CoinEntry(&outpoint), coin)) { |
75 | 0 | Assert(!coin.IsSpent()); // The UTXO database should never contain spent coins Line | Count | Source | 113 | 0 | #define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val) |
|
76 | 0 | return coin; |
77 | 0 | } |
78 | 0 | return std::nullopt; |
79 | 0 | } |
80 | | |
81 | 0 | bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const { |
82 | 0 | return m_db->Exists(CoinEntry(&outpoint)); |
83 | 0 | } |
84 | | |
85 | 0 | uint256 CCoinsViewDB::GetBestBlock() const { |
86 | 0 | uint256 hashBestChain; |
87 | 0 | if (!m_db->Read(DB_BEST_BLOCK, hashBestChain)) |
88 | 0 | return uint256(); |
89 | 0 | return hashBestChain; |
90 | 0 | } |
91 | | |
92 | 0 | std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const { |
93 | 0 | std::vector<uint256> vhashHeadBlocks; |
94 | 0 | if (!m_db->Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) { |
95 | 0 | return std::vector<uint256>(); |
96 | 0 | } |
97 | 0 | return vhashHeadBlocks; |
98 | 0 | } |
99 | | |
100 | | void CCoinsViewDB::BatchWrite(CoinsViewCacheCursor& cursor, const uint256& hashBlock) |
101 | 0 | { |
102 | 0 | CDBBatch batch(*m_db); |
103 | 0 | size_t count = 0; |
104 | 0 | const size_t dirty_count{cursor.GetDirtyCount()}; |
105 | 0 | assert(!hashBlock.IsNull()); |
106 | | |
107 | 0 | uint256 old_tip = GetBestBlock(); |
108 | 0 | if (old_tip.IsNull()) { |
109 | | // We may be in the middle of replaying. |
110 | 0 | std::vector<uint256> old_heads = GetHeadBlocks(); |
111 | 0 | if (old_heads.size() == 2) { |
112 | 0 | if (old_heads[0] != hashBlock) { |
113 | 0 | LogError("The coins database detected an inconsistent state, likely due to a previous crash or shutdown. You will need to restart bitcoind with the -reindex-chainstate or -reindex configuration option.\n");Line | Count | Source | 97 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 89 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
114 | 0 | } |
115 | 0 | assert(old_heads[0] == hashBlock); |
116 | 0 | old_tip = old_heads[1]; |
117 | 0 | } |
118 | 0 | } |
119 | | |
120 | 0 | if (dirty_count > WARN_FLUSH_COINS_COUNT) LogWarning("Flushing large (%d entries) UTXO set to disk, it may take several minutes", dirty_count);Line | Count | Source | 96 | 0 | #define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 89 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
121 | 0 | LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d out of %d cached coins)",Line | Count | Source | 104 | 0 | BCLog::Timer<std::chrono::milliseconds> UNIQUE_NAME(logging_timer)(__func__, end_msg, log_category) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
122 | 0 | dirty_count, cursor.GetTotalCount()), BCLog::BENCH); |
123 | | |
124 | | // In the first batch, mark the database as being in the middle of a |
125 | | // transition from old_tip to hashBlock. |
126 | | // A vector is used for future extensibility, as we may want to support |
127 | | // interrupting after partial writes from multiple independent reorgs. |
128 | 0 | batch.Erase(DB_BEST_BLOCK); |
129 | 0 | batch.Write(DB_HEAD_BLOCKS, Vector(hashBlock, old_tip)); |
130 | |
|
131 | 0 | for (auto it{cursor.Begin()}; it != cursor.End();) { |
132 | 0 | if (it->second.IsDirty()) { |
133 | 0 | CoinEntry entry(&it->first); |
134 | 0 | if (it->second.coin.IsSpent()) { |
135 | 0 | batch.Erase(entry); |
136 | 0 | } else { |
137 | 0 | batch.Write(entry, it->second.coin); |
138 | 0 | } |
139 | 0 | } |
140 | 0 | count++; |
141 | 0 | it = cursor.NextAndMaybeErase(*it); |
142 | 0 | if (batch.ApproximateSize() > m_options.batch_write_bytes) { |
143 | 0 | LogDebug(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.ApproximateSize() * (1.0 / 1048576.0)); Line | Count | Source | 115 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 106 | 0 | do { \ | 107 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 108 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 109 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 110 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 89 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 111 | 0 | } \ | 112 | 0 | } while (0) |
|
|
144 | |
|
145 | 0 | m_db->WriteBatch(batch); |
146 | 0 | batch.Clear(); |
147 | 0 | if (m_options.simulate_crash_ratio) { |
148 | 0 | static FastRandomContext rng; |
149 | 0 | if (rng.randrange(m_options.simulate_crash_ratio) == 0) { |
150 | 0 | LogError("Simulating a crash. Goodbye.");Line | Count | Source | 97 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 89 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
151 | 0 | _Exit(0); |
152 | 0 | } |
153 | 0 | } |
154 | 0 | } |
155 | 0 | } |
156 | | |
157 | | // In the last batch, mark the database as consistent with hashBlock again. |
158 | 0 | batch.Erase(DB_HEAD_BLOCKS); |
159 | 0 | batch.Write(DB_BEST_BLOCK, hashBlock); |
160 | |
|
161 | 0 | LogDebug(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.ApproximateSize() * (1.0 / 1048576.0)); Line | Count | Source | 115 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 106 | 0 | do { \ | 107 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 108 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 109 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 110 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 89 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 111 | 0 | } \ | 112 | 0 | } while (0) |
|
|
162 | 0 | m_db->WriteBatch(batch); |
163 | 0 | LogDebug(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...", (unsigned int)dirty_count, (unsigned int)count); Line | Count | Source | 115 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 106 | 0 | do { \ | 107 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 108 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 109 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 110 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 89 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 111 | 0 | } \ | 112 | 0 | } while (0) |
|
|
164 | 0 | } |
165 | | |
166 | | size_t CCoinsViewDB::EstimateSize() const |
167 | 0 | { |
168 | 0 | return m_db->EstimateSize(DB_COIN, uint8_t(DB_COIN + 1)); |
169 | 0 | } |
170 | | |
171 | | /** Specialization of CCoinsViewCursor to iterate over a CCoinsViewDB */ |
172 | | class CCoinsViewDBCursor: public CCoinsViewCursor |
173 | | { |
174 | | public: |
175 | | // Prefer using CCoinsViewDB::Cursor() since we want to perform some |
176 | | // cache warmup on instantiation. |
177 | | CCoinsViewDBCursor(CDBIterator* pcursorIn, const uint256&hashBlockIn): |
178 | 0 | CCoinsViewCursor(hashBlockIn), pcursor(pcursorIn) {} |
179 | 0 | ~CCoinsViewDBCursor() = default; |
180 | | |
181 | | bool GetKey(COutPoint &key) const override; |
182 | | bool GetValue(Coin &coin) const override; |
183 | | |
184 | | bool Valid() const override; |
185 | | void Next() override; |
186 | | |
187 | | private: |
188 | | std::unique_ptr<CDBIterator> pcursor; |
189 | | std::pair<char, COutPoint> keyTmp; |
190 | | |
191 | | friend class CCoinsViewDB; |
192 | | }; |
193 | | |
194 | | std::unique_ptr<CCoinsViewCursor> CCoinsViewDB::Cursor() const |
195 | 0 | { |
196 | 0 | auto i = std::make_unique<CCoinsViewDBCursor>( |
197 | 0 | const_cast<CDBWrapper&>(*m_db).NewIterator(), GetBestBlock()); |
198 | | /* It seems that there are no "const iterators" for LevelDB. Since we |
199 | | only need read operations on it, use a const-cast to get around |
200 | | that restriction. */ |
201 | 0 | i->pcursor->Seek(DB_COIN); |
202 | | // Cache key of first record |
203 | 0 | if (i->pcursor->Valid()) { |
204 | 0 | CoinEntry entry(&i->keyTmp.second); |
205 | 0 | i->pcursor->GetKey(entry); |
206 | 0 | i->keyTmp.first = entry.key; |
207 | 0 | } else { |
208 | 0 | i->keyTmp.first = 0; // Make sure Valid() and GetKey() return false |
209 | 0 | } |
210 | 0 | return i; |
211 | 0 | } |
212 | | |
213 | | bool CCoinsViewDBCursor::GetKey(COutPoint &key) const |
214 | 0 | { |
215 | | // Return cached key |
216 | 0 | if (keyTmp.first == DB_COIN) { |
217 | 0 | key = keyTmp.second; |
218 | 0 | return true; |
219 | 0 | } |
220 | 0 | return false; |
221 | 0 | } |
222 | | |
223 | | bool CCoinsViewDBCursor::GetValue(Coin &coin) const |
224 | 0 | { |
225 | 0 | return pcursor->GetValue(coin); |
226 | 0 | } |
227 | | |
228 | | bool CCoinsViewDBCursor::Valid() const |
229 | 0 | { |
230 | 0 | return keyTmp.first == DB_COIN; |
231 | 0 | } |
232 | | |
233 | | void CCoinsViewDBCursor::Next() |
234 | 0 | { |
235 | 0 | pcursor->Next(); |
236 | 0 | CoinEntry entry(&keyTmp.second); |
237 | 0 | if (!pcursor->Valid() || !pcursor->GetKey(entry)) { |
238 | 0 | keyTmp.first = 0; // Invalidate cached key after last record so that Valid() and GetKey() return false |
239 | 0 | } else { |
240 | 0 | keyTmp.first = entry.key; |
241 | 0 | } |
242 | 0 | } |