1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
// This file is part of OpenEthereum.
// OpenEthereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// OpenEthereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with OpenEthereum. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed `HashDB` implementation.
use std::{io, sync::Arc};
use bytes::Bytes;
use ethcore_db::{DBTransaction, DBValue, KeyValueDB};
use ethereum_types::H256;
use hash_db::{AsHashDB, HashDB};
use keccak_hasher::KeccakHasher;
use std::collections::{BTreeMap, HashMap};
/// expose keys of a hashDB for debugging or tests (slow).
pub trait KeyedHashDB: HashDB<KeccakHasher, DBValue> {
/// Primarily use for tests, highly inefficient.
fn keys(&self) -> HashMap<H256, i32>;
}
/// Upcast to `KeyedHashDB`
pub trait AsKeyedHashDB: AsHashDB<KeccakHasher, DBValue> {
/// Perform upcast to KeyedHashDB.
fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB;
}
/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually
/// exclusive actions.
pub trait JournalDB: KeyedHashDB {
/// Return a copy of ourself, in a box.
fn boxed_clone(&self) -> Box<dyn JournalDB>;
/// Returns heap memory size used
fn get_sizes(&self, sizes: &mut BTreeMap<String, usize>);
/// Returns the size of journalled state in memory.
/// This function has a considerable speed requirement --
/// it must be fast enough to call several times per block imported.
fn journal_size(&self) -> usize {
0
}
/// Check if this database has any commits
fn is_empty(&self) -> bool;
/// Get the earliest era in the DB. None if there isn't yet any data in there.
fn earliest_era(&self) -> Option<u64> {
None
}
/// Get the latest era in the DB. None if there isn't yet any data in there.
fn latest_era(&self) -> Option<u64>;
/// Journal recent database operations as being associated with a given era and id.
// TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves.
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32>;
/// Mark a given block as canonical, indicating that competing blocks' states may be pruned out.
fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256)
-> io::Result<u32>;
/// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions
/// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated.
///
/// Any keys or values inserted or deleted must be completely independent of those affected
/// by any previous `commit` operations. Essentially, this means that `inject` can be used
/// either to restore a state to a fresh database, or to insert data which may only be journalled
/// from this point onwards.
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32>;
/// Whether this database is pruned.
fn is_pruned(&self) -> bool {
true
}
/// Get backing database.
fn backing(&self) -> &Arc<dyn KeyValueDB>;
/// Clear internal strucutres. This should called after changes have been written
/// to the backing strage
fn flush(&self) {}
/// Consolidate all the insertions and deletions in the given memory overlay.
fn consolidate(&mut self, overlay: ::memory_db::MemoryDB<KeccakHasher, DBValue>);
/// State data query
fn state(&self, id: &H256) -> Option<Bytes>;
/// Commit all changes in a single batch
#[cfg(test)]
fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> io::Result<u32> {
let mut batch = self.backing().transaction();
let mut ops = self.journal_under(&mut batch, now, id)?;
if let Some((end_era, canon_id)) = end {
ops += self.mark_canonical(&mut batch, end_era, &canon_id)?;
}
let result = self.backing().write(batch).map(|_| ops).map_err(Into::into);
self.flush();
result
}
/// Inject all changes in a single batch.
#[cfg(test)]
fn inject_batch(&mut self) -> io::Result<u32> {
let mut batch = self.backing().transaction();
let res = self.inject(&mut batch)?;
self.backing().write(batch).map(|_| res).map_err(Into::into)
}
}