Bitcoin Core Fuzz Coverage Report

Coverage Report

Created: 2026-03-24 13:57

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/bitcoin/src/index/base.cpp
Line
Count
Source
1
// Copyright (c) 2017-present The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <index/base.h>
6
7
#include <chain.h>
8
#include <common/args.h>
9
#include <dbwrapper.h>
10
#include <interfaces/chain.h>
11
#include <interfaces/types.h>
12
#include <kernel/types.h>
13
#include <node/abort.h>
14
#include <node/blockstorage.h>
15
#include <node/context.h>
16
#include <node/database_args.h>
17
#include <node/interface_ui.h>
18
#include <primitives/block.h>
19
#include <sync.h>
20
#include <tinyformat.h>
21
#include <uint256.h>
22
#include <undo.h>
23
#include <util/fs.h>
24
#include <util/log.h>
25
#include <util/string.h>
26
#include <util/thread.h>
27
#include <util/threadinterrupt.h>
28
#include <util/time.h>
29
#include <util/translation.h>
30
#include <validation.h>
31
#include <validationinterface.h>
32
33
#include <cassert>
34
#include <compare>
35
#include <cstdint>
36
#include <memory>
37
#include <optional>
38
#include <span>
39
#include <stdexcept>
40
#include <string>
41
#include <thread>
42
#include <utility>
43
#include <vector>
44
45
using kernel::ChainstateRole;
46
47
constexpr uint8_t DB_BEST_BLOCK{'B'};
48
49
constexpr auto SYNC_LOG_INTERVAL{30s};
50
constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
51
52
template <typename... Args>
53
void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
54
0
{
55
0
    auto message = tfm::format(fmt, args...);
56
0
    node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
57
0
}
Unexecuted instantiation: void BaseIndex::FatalErrorf<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >(util::ConstevalFormatString<sizeof...(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >)>, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)
Unexecuted instantiation: void BaseIndex::FatalErrorf<int>(util::ConstevalFormatString<sizeof...(int)>, int const&)
58
59
CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
60
0
{
61
0
    CBlockLocator locator;
62
0
    bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator));
63
0
    assert(found);
64
0
    assert(!locator.IsNull());
65
0
    return locator;
66
0
}
67
68
BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
69
0
    CDBWrapper{DBParams{
70
0
        .path = path,
71
0
        .cache_bytes = n_cache_size,
72
0
        .memory_only = f_memory,
73
0
        .wipe_data = f_wipe,
74
0
        .obfuscate = f_obfuscate,
75
0
        .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
76
0
{}
77
78
CBlockLocator BaseIndex::DB::ReadBestBlock() const
79
0
{
80
0
    CBlockLocator locator;
81
82
0
    bool success = Read(DB_BEST_BLOCK, locator);
83
0
    if (!success) {
84
0
        locator.SetNull();
85
0
    }
86
87
0
    return locator;
88
0
}
89
90
void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
91
0
{
92
0
    batch.Write(DB_BEST_BLOCK, locator);
93
0
}
94
95
BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
96
0
    : m_chain{std::move(chain)}, m_name{std::move(name)} {}
97
98
BaseIndex::~BaseIndex()
99
0
{
100
0
    Interrupt();
101
0
    Stop();
102
0
}
103
104
bool BaseIndex::Init()
105
0
{
106
0
    AssertLockNotHeld(cs_main);
Line
Count
Source
147
0
#define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs)
107
108
    // May need reset if index is being restarted.
109
0
    m_interrupt.reset();
110
111
    // m_chainstate member gives indexing code access to node internals. It is
112
    // removed in followup https://github.com/bitcoin/bitcoin/pull/24230
113
0
    m_chainstate = WITH_LOCK(::cs_main,
Line
Count
Source
297
0
#define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }())
114
0
                             return &m_chain->context()->chainman->ValidatedChainstate());
115
    // Register to validation interface before setting the 'm_synced' flag, so that
116
    // callbacks are not missed once m_synced is true.
117
0
    m_chain->context()->validation_signals->RegisterValidationInterface(this);
118
119
0
    const auto locator{GetDB().ReadBestBlock()};
120
121
0
    LOCK(cs_main);
Line
Count
Source
266
0
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
0
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
0
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
0
#define PASTE(x, y) x ## y
122
0
    CChain& index_chain = m_chainstate->m_chain;
123
124
0
    if (locator.IsNull()) {
125
0
        SetBestBlockIndex(nullptr);
126
0
    } else {
127
        // Setting the best block to the locator's top block. If it is not part of the
128
        // best chain, we will rewind to the fork point during index sync
129
0
        const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))};
130
0
        if (!locator_index) {
131
0
            return InitError(Untranslated(strprintf("best block of %s not found. Please rebuild the index.", GetName())));
Line
Count
Source
1172
0
#define strprintf tfm::format
132
0
        }
133
0
        SetBestBlockIndex(locator_index);
134
0
    }
135
136
    // Child init
137
0
    const CBlockIndex* start_block = m_best_block_index.load();
138
0
    if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
139
0
        return false;
140
0
    }
141
142
    // Note: this will latch to true immediately if the user starts up with an empty
143
    // datadir and an index enabled. If this is the case, indexation will happen solely
144
    // via `BlockConnected` signals until, possibly, the next restart.
145
0
    m_synced = start_block == index_chain.Tip();
146
0
    m_init = true;
147
0
    return true;
148
0
}
149
150
static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
151
0
{
152
0
    AssertLockHeld(cs_main);
Line
Count
Source
142
0
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
153
154
0
    if (!pindex_prev) {
155
0
        return chain.Genesis();
156
0
    }
157
158
0
    const CBlockIndex* pindex = chain.Next(pindex_prev);
159
0
    if (pindex) {
160
0
        return pindex;
161
0
    }
162
163
    // Since block is not in the chain, return the next block in the chain AFTER the last common ancestor.
164
    // Caller will be responsible for rewinding back to the common ancestor.
165
0
    return chain.Next(chain.FindFork(pindex_prev));
166
0
}
167
168
bool BaseIndex::ProcessBlock(const CBlockIndex* pindex, const CBlock* block_data)
169
0
{
170
0
    interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block_data);
171
172
0
    CBlock block;
173
0
    if (!block_data) { // disk lookup if block data wasn't provided
174
0
        if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) {
175
0
            FatalErrorf("Failed to read block %s from disk",
176
0
                        pindex->GetBlockHash().ToString());
177
0
            return false;
178
0
        }
179
0
        block_info.data = &block;
180
0
    }
181
182
0
    CBlockUndo block_undo;
183
0
    if (CustomOptions().connect_undo_data) {
184
0
        if (pindex->nHeight > 0 && !m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) {
185
0
            FatalErrorf("Failed to read undo block data %s from disk",
186
0
                        pindex->GetBlockHash().ToString());
187
0
            return false;
188
0
        }
189
0
        block_info.undo_data = &block_undo;
190
0
    }
191
192
0
    if (!CustomAppend(block_info)) {
193
0
        FatalErrorf("Failed to write block %s to index database",
194
0
                    pindex->GetBlockHash().ToString());
195
0
        return false;
196
0
    }
197
198
0
    return true;
199
0
}
200
201
void BaseIndex::Sync()
202
0
{
203
0
    const CBlockIndex* pindex = m_best_block_index.load();
204
0
    if (!m_synced) {
205
0
        auto last_log_time{NodeClock::now()};
206
0
        auto last_locator_write_time{last_log_time};
207
0
        while (true) {
208
0
            if (m_interrupt) {
209
0
                LogInfo("%s: m_interrupt set; exiting ThreadSync", GetName());
Line
Count
Source
95
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
210
211
0
                SetBestBlockIndex(pindex);
212
                // No need to handle errors in Commit. If it fails, the error will be already be
213
                // logged. The best way to recover is to continue, as index cannot be corrupted by
214
                // a missed commit to disk for an advanced index state.
215
0
                Commit();
216
0
                return;
217
0
            }
218
219
0
            const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain));
Line
Count
Source
297
0
#define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }())
220
            // If pindex_next is null, it means pindex is the chain tip, so
221
            // commit data indexed so far.
222
0
            if (!pindex_next) {
223
0
                SetBestBlockIndex(pindex);
224
                // No need to handle errors in Commit. See rationale above.
225
0
                Commit();
226
227
                // If pindex is still the chain tip after committing, exit the
228
                // sync loop. It is important for cs_main to be locked while
229
                // setting m_synced = true, otherwise a new block could be
230
                // attached while m_synced is still false, and it would not be
231
                // indexed.
232
0
                LOCK(::cs_main);
Line
Count
Source
266
0
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
0
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
0
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
0
#define PASTE(x, y) x ## y
233
0
                pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
234
0
                if (!pindex_next) {
235
0
                    m_synced = true;
236
0
                    break;
237
0
                }
238
0
            }
239
0
            if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
240
0
                FatalErrorf("Failed to rewind %s to a previous chain tip", GetName());
241
0
                return;
242
0
            }
243
0
            pindex = pindex_next;
244
245
246
0
            if (!ProcessBlock(pindex)) return; // error logged internally
247
248
0
            auto current_time{NodeClock::now()};
249
0
            if (current_time - last_log_time >= SYNC_LOG_INTERVAL) {
250
0
                LogInfo("Syncing %s with block chain from height %d", GetName(), pindex->nHeight);
Line
Count
Source
95
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
251
0
                last_log_time = current_time;
252
0
            }
253
254
0
            if (current_time - last_locator_write_time >= SYNC_LOCATOR_WRITE_INTERVAL) {
255
0
                SetBestBlockIndex(pindex);
256
0
                last_locator_write_time = current_time;
257
                // No need to handle errors in Commit. See rationale above.
258
0
                Commit();
259
0
            }
260
0
        }
261
0
    }
262
263
0
    if (pindex) {
264
0
        LogInfo("%s is enabled at height %d", GetName(), pindex->nHeight);
Line
Count
Source
95
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
265
0
    } else {
266
0
        LogInfo("%s is enabled", GetName());
Line
Count
Source
95
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
267
0
    }
268
0
}
269
270
bool BaseIndex::Commit()
271
0
{
272
    // Don't commit anything if we haven't indexed any block yet
273
    // (this could happen if init is interrupted).
274
0
    bool ok = m_best_block_index != nullptr;
275
0
    if (ok) {
276
0
        CDBBatch batch(GetDB());
277
0
        ok = CustomCommit(batch);
278
0
        if (ok) {
279
0
            GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
280
0
            GetDB().WriteBatch(batch);
281
0
        }
282
0
    }
283
0
    if (!ok) {
284
0
        LogError("Failed to commit latest %s state", GetName());
Line
Count
Source
97
0
#define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
285
0
        return false;
286
0
    }
287
0
    return true;
288
0
}
289
290
bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
291
0
{
292
0
    assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
293
294
0
    CBlock block;
295
0
    CBlockUndo block_undo;
296
297
0
    for (const CBlockIndex* iter_tip = current_tip; iter_tip != new_tip; iter_tip = iter_tip->pprev) {
298
0
        interfaces::BlockInfo block_info = kernel::MakeBlockInfo(iter_tip);
299
0
        if (CustomOptions().disconnect_data) {
300
0
            if (!m_chainstate->m_blockman.ReadBlock(block, *iter_tip)) {
301
0
                LogError("Failed to read block %s from disk",
Line
Count
Source
97
0
#define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
302
0
                         iter_tip->GetBlockHash().ToString());
303
0
                return false;
304
0
            }
305
0
            block_info.data = &block;
306
0
        }
307
0
        if (CustomOptions().disconnect_undo_data && iter_tip->nHeight > 0) {
308
0
            if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *iter_tip)) {
309
0
                return false;
310
0
            }
311
0
            block_info.undo_data = &block_undo;
312
0
        }
313
0
        if (!CustomRemove(block_info)) {
314
0
            return false;
315
0
        }
316
0
    }
317
318
    // Don't commit here - the committed index state must never be ahead of the
319
    // flushed chainstate, otherwise unclean restarts would lead to index corruption.
320
    // Pruning has a minimum of 288 blocks-to-keep and getting the index
321
    // out of sync may be possible but a users fault.
322
    // In case we reorg beyond the pruned depth, ReadBlock would
323
    // throw and lead to a graceful shutdown
324
0
    SetBestBlockIndex(new_tip);
325
0
    return true;
326
0
}
327
328
void BaseIndex::BlockConnected(const ChainstateRole& role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
329
0
{
330
    // Ignore events from not fully validated chains to avoid out-of-order indexing.
331
    //
332
    // TODO at some point we could parameterize whether a particular index can be
333
    // built out of order, but for now just do the conservative simple thing.
334
0
    if (!role.validated) {
335
0
        return;
336
0
    }
337
338
    // Ignore BlockConnected signals until we have fully indexed the chain.
339
0
    if (!m_synced) {
340
0
        return;
341
0
    }
342
343
0
    const CBlockIndex* best_block_index = m_best_block_index.load();
344
0
    if (!best_block_index) {
345
0
        if (pindex->nHeight != 0) {
346
0
            FatalErrorf("First block connected is not the genesis block (height=%d)",
347
0
                       pindex->nHeight);
348
0
            return;
349
0
        }
350
0
    } else {
351
        // Ensure block connects to an ancestor of the current best block. This should be the case
352
        // most of the time, but may not be immediately after the sync thread catches up and sets
353
        // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
354
        // in the ValidationInterface queue backlog even after the sync thread has caught up to the
355
        // new chain tip. In this unlikely event, log a warning and let the queue clear.
356
0
        if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
357
0
            LogWarning("Block %s does not connect to an ancestor of "
Line
Count
Source
96
0
#define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
358
0
                      "known best chain (tip=%s); not updating index",
359
0
                      pindex->GetBlockHash().ToString(),
360
0
                      best_block_index->GetBlockHash().ToString());
361
0
            return;
362
0
        }
363
0
        if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
364
0
            FatalErrorf("Failed to rewind %s to a previous chain tip",
365
0
                       GetName());
366
0
            return;
367
0
        }
368
0
    }
369
370
    // Dispatch block to child class; errors are logged internally and abort the node.
371
0
    if (ProcessBlock(pindex, block.get())) {
372
        // Setting the best block index is intentionally the last step of this
373
        // function, so BlockUntilSyncedToCurrentChain callers waiting for the
374
        // best block index to be updated can rely on the block being fully
375
        // processed, and the index object being safe to delete.
376
0
        SetBestBlockIndex(pindex);
377
0
    }
378
0
}
379
380
void BaseIndex::ChainStateFlushed(const ChainstateRole& role, const CBlockLocator& locator)
381
0
{
382
    // Ignore events from not fully validated chains to avoid out-of-order indexing.
383
0
    if (!role.validated) {
384
0
        return;
385
0
    }
386
387
0
    if (!m_synced) {
388
0
        return;
389
0
    }
390
391
0
    const uint256& locator_tip_hash = locator.vHave.front();
392
0
    const CBlockIndex* locator_tip_index;
393
0
    {
394
0
        LOCK(cs_main);
Line
Count
Source
266
0
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
0
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
0
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
0
#define PASTE(x, y) x ## y
395
0
        locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
396
0
    }
397
398
0
    if (!locator_tip_index) {
399
0
        FatalErrorf("First block (hash=%s) in locator was not found",
400
0
                   locator_tip_hash.ToString());
401
0
        return;
402
0
    }
403
404
    // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
405
    // immediately after the sync thread catches up and sets m_synced. Consider the case where
406
    // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
407
    // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
408
    // event, log a warning and let the queue clear.
409
0
    const CBlockIndex* best_block_index = m_best_block_index.load();
410
0
    if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
411
0
        LogWarning("Locator contains block (hash=%s) not on known best "
Line
Count
Source
96
0
#define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
412
0
                  "chain (tip=%s); not writing index locator",
413
0
                  locator_tip_hash.ToString(),
414
0
                  best_block_index->GetBlockHash().ToString());
415
0
        return;
416
0
    }
417
418
    // No need to handle errors in Commit. If it fails, the error will be already be logged. The
419
    // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
420
    // for an advanced index state.
421
0
    Commit();
422
0
}
423
424
bool BaseIndex::BlockUntilSyncedToCurrentChain() const
425
0
{
426
0
    AssertLockNotHeld(cs_main);
Line
Count
Source
147
0
#define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs)
427
428
0
    if (!m_synced) {
429
0
        return false;
430
0
    }
431
432
0
    {
433
        // Skip the queue-draining stuff if we know we're caught up with
434
        // m_chain.Tip().
435
0
        LOCK(cs_main);
Line
Count
Source
266
0
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
0
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
0
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
0
#define PASTE(x, y) x ## y
436
0
        const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
437
0
        const CBlockIndex* best_block_index = m_best_block_index.load();
438
0
        if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
439
0
            return true;
440
0
        }
441
0
    }
442
443
0
    LogInfo("%s is catching up on block notifications", GetName());
Line
Count
Source
95
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
89
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
444
0
    m_chain->context()->validation_signals->SyncWithValidationInterfaceQueue();
445
0
    return true;
446
0
}
447
448
void BaseIndex::Interrupt()
449
0
{
450
0
    m_interrupt();
451
0
}
452
453
bool BaseIndex::StartBackgroundSync()
454
0
{
455
0
    if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
456
457
0
    m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); });
458
0
    return true;
459
0
}
460
461
void BaseIndex::Stop()
462
0
{
463
0
    if (m_chain->context()->validation_signals) {
464
0
        m_chain->context()->validation_signals->UnregisterValidationInterface(this);
465
0
    }
466
467
0
    if (m_thread_sync.joinable()) {
468
0
        m_thread_sync.join();
469
0
    }
470
0
}
471
472
IndexSummary BaseIndex::GetSummary() const
473
0
{
474
0
    IndexSummary summary{};
475
0
    summary.name = GetName();
476
0
    summary.synced = m_synced;
477
0
    if (const auto& pindex = m_best_block_index.load()) {
478
0
        summary.best_block_height = pindex->nHeight;
479
0
        summary.best_block_hash = pindex->GetBlockHash();
480
0
    } else {
481
0
        summary.best_block_height = 0;
482
0
        summary.best_block_hash = m_chain->getBlockHash(0);
483
0
    }
484
0
    return summary;
485
0
}
486
487
void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
488
0
{
489
0
    assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
490
491
0
    if (AllowPrune() && block) {
492
0
        node::PruneLockInfo prune_lock;
493
0
        prune_lock.height_first = block->nHeight;
494
0
        WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock));
Line
Count
Source
297
0
#define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }())
495
0
    }
496
497
    // Intentionally set m_best_block_index as the last step in this function,
498
    // after updating prune locks above, and after making any other references
499
    // to *this, so the BlockUntilSyncedToCurrentChain function (which checks
500
    // m_best_block_index as an optimization) can be used to wait for the last
501
    // BlockConnected notification and safely assume that prune locks are
502
    // updated and that the index object is safe to delete.
503
0
    m_best_block_index = block;
504
0
}