mirror of
https://github.com/cypherstack/stack_wallet.git
synced 2025-02-02 11:16:36 +00:00
fix: incremental spark data cache
This commit is contained in:
parent
2028505367
commit
4439ad70d2
10 changed files with 200 additions and 330 deletions
|
@ -31,7 +31,7 @@ void _debugLog(Object? object) {
|
|||
}
|
||||
|
||||
abstract class _FiroCache {
|
||||
static const int _setCacheVersion = 1;
|
||||
static const int _setCacheVersion = 2;
|
||||
static const int _tagsCacheVersion = 2;
|
||||
|
||||
static final networks = [
|
||||
|
@ -43,17 +43,12 @@ abstract class _FiroCache {
|
|||
network == CryptoCurrencyNetwork.main
|
||||
? "spark_set_v$_setCacheVersion.sqlite3"
|
||||
: "spark_set_v${_setCacheVersion}_${network.name}.sqlite3";
|
||||
static String sparkSetMetaCacheFileName(CryptoCurrencyNetwork network) =>
|
||||
network == CryptoCurrencyNetwork.main
|
||||
? "spark_set_meta_v$_setCacheVersion.sqlite3"
|
||||
: "spark_set_meta_v${_setCacheVersion}_${network.name}.sqlite3";
|
||||
static String sparkUsedTagsCacheFileName(CryptoCurrencyNetwork network) =>
|
||||
network == CryptoCurrencyNetwork.main
|
||||
? "spark_tags_v$_tagsCacheVersion.sqlite3"
|
||||
: "spark_tags_v${_tagsCacheVersion}_${network.name}.sqlite3";
|
||||
|
||||
static final Map<CryptoCurrencyNetwork, Database> _setCacheDB = {};
|
||||
static final Map<CryptoCurrencyNetwork, Database> _setMetaCacheDB = {};
|
||||
static final Map<CryptoCurrencyNetwork, Database> _usedTagsCacheDB = {};
|
||||
static Database setCacheDB(CryptoCurrencyNetwork network) {
|
||||
if (_setCacheDB[network] == null) {
|
||||
|
@ -64,15 +59,6 @@ abstract class _FiroCache {
|
|||
return _setCacheDB[network]!;
|
||||
}
|
||||
|
||||
static Database setMetaCacheDB(CryptoCurrencyNetwork network) {
|
||||
if (_setMetaCacheDB[network] == null) {
|
||||
throw Exception(
|
||||
"FiroCache.init() must be called before accessing FiroCache.db!",
|
||||
);
|
||||
}
|
||||
return _setMetaCacheDB[network]!;
|
||||
}
|
||||
|
||||
static Database usedTagsCacheDB(CryptoCurrencyNetwork network) {
|
||||
if (_usedTagsCacheDB[network] == null) {
|
||||
throw Exception(
|
||||
|
@ -93,18 +79,12 @@ abstract class _FiroCache {
|
|||
final sparkSetCacheFile =
|
||||
File("${sqliteDir.path}/${sparkSetCacheFileName(network)}");
|
||||
|
||||
final sparkSetMetaCacheFile =
|
||||
File("${sqliteDir.path}/${sparkSetMetaCacheFileName(network)}");
|
||||
|
||||
final sparkUsedTagsCacheFile =
|
||||
File("${sqliteDir.path}/${sparkUsedTagsCacheFileName(network)}");
|
||||
|
||||
if (!(await sparkSetCacheFile.exists())) {
|
||||
await _createSparkSetCacheDb(sparkSetCacheFile.path);
|
||||
}
|
||||
if (!(await sparkSetMetaCacheFile.exists())) {
|
||||
await _createSparkSetMetaCacheDb(sparkSetMetaCacheFile.path);
|
||||
}
|
||||
if (!(await sparkUsedTagsCacheFile.exists())) {
|
||||
await _createSparkUsedTagsCacheDb(sparkUsedTagsCacheFile.path);
|
||||
}
|
||||
|
@ -113,10 +93,6 @@ abstract class _FiroCache {
|
|||
sparkSetCacheFile.path,
|
||||
mode: OpenMode.readWrite,
|
||||
);
|
||||
_setMetaCacheDB[network] = sqlite3.open(
|
||||
sparkSetMetaCacheFile.path,
|
||||
mode: OpenMode.readWrite,
|
||||
);
|
||||
_usedTagsCacheDB[network] = sqlite3.open(
|
||||
sparkUsedTagsCacheFile.path,
|
||||
mode: OpenMode.readWrite,
|
||||
|
@ -134,12 +110,6 @@ abstract class _FiroCache {
|
|||
VACUUM;
|
||||
""",
|
||||
);
|
||||
setMetaCacheDB(network).execute(
|
||||
"""
|
||||
DELETE FROM PreviousMetaFetchResult;
|
||||
VACUUM;
|
||||
""",
|
||||
);
|
||||
usedTagsCacheDB(network).execute(
|
||||
"""
|
||||
DELETE FROM SparkUsedCoinTags;
|
||||
|
@ -165,7 +135,7 @@ abstract class _FiroCache {
|
|||
blockHash TEXT NOT NULL,
|
||||
setHash TEXT NOT NULL,
|
||||
groupId INTEGER NOT NULL,
|
||||
timestampUTC INTEGER NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
UNIQUE (blockHash, setHash, groupId)
|
||||
);
|
||||
|
||||
|
@ -174,7 +144,8 @@ abstract class _FiroCache {
|
|||
serialized TEXT NOT NULL,
|
||||
txHash TEXT NOT NULL,
|
||||
context TEXT NOT NULL,
|
||||
UNIQUE(serialized, txHash, context)
|
||||
groupId INTEGER NOT NULL,
|
||||
UNIQUE(serialized, txHash, context, groupId)
|
||||
);
|
||||
|
||||
CREATE TABLE SparkSetCoins (
|
||||
|
@ -190,27 +161,6 @@ abstract class _FiroCache {
|
|||
db.dispose();
|
||||
}
|
||||
|
||||
static Future<void> _createSparkSetMetaCacheDb(String file) async {
|
||||
final db = sqlite3.open(
|
||||
file,
|
||||
mode: OpenMode.readWriteCreate,
|
||||
);
|
||||
|
||||
db.execute(
|
||||
"""
|
||||
CREATE TABLE PreviousMetaFetchResult (
|
||||
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
|
||||
coinGroupId INTEGER NOT NULL UNIQUE,
|
||||
blockHash TEXT NOT NULL,
|
||||
setHash TEXT NOT NULL,
|
||||
size INTEGER NOT NULL
|
||||
);
|
||||
""",
|
||||
);
|
||||
|
||||
db.dispose();
|
||||
}
|
||||
|
||||
static Future<void> _createSparkUsedTagsCacheDb(String file) async {
|
||||
final db = sqlite3.open(
|
||||
file,
|
||||
|
|
|
@ -32,9 +32,6 @@ abstract class FiroCacheCoordinator {
|
|||
final setCacheFile = File(
|
||||
"${dir.path}/${_FiroCache.sparkSetCacheFileName(network)}",
|
||||
);
|
||||
final setMetaCacheFile = File(
|
||||
"${dir.path}/${_FiroCache.sparkSetMetaCacheFileName(network)}",
|
||||
);
|
||||
final usedTagsCacheFile = File(
|
||||
"${dir.path}/${_FiroCache.sparkUsedTagsCacheFileName(network)}",
|
||||
);
|
||||
|
@ -44,8 +41,6 @@ abstract class FiroCacheCoordinator {
|
|||
final tagsSize = (await usedTagsCacheFile.exists())
|
||||
? await usedTagsCacheFile.length()
|
||||
: 0;
|
||||
final setMetaSize =
|
||||
(await setMetaCacheFile.exists()) ? await setMetaCacheFile.length() : 0;
|
||||
|
||||
Logging.instance.log(
|
||||
"Spark cache used tags size: $tagsSize",
|
||||
|
@ -55,12 +50,8 @@ abstract class FiroCacheCoordinator {
|
|||
"Spark cache anon set size: $setSize",
|
||||
level: LogLevel.Debug,
|
||||
);
|
||||
Logging.instance.log(
|
||||
"Spark cache set meta size: $setMetaSize",
|
||||
level: LogLevel.Debug,
|
||||
);
|
||||
|
||||
final int bytes = tagsSize + setSize + setMetaSize;
|
||||
final int bytes = tagsSize + setSize;
|
||||
|
||||
if (bytes < 1024) {
|
||||
return '$bytes B';
|
||||
|
@ -104,93 +95,70 @@ abstract class FiroCacheCoordinator {
|
|||
void Function(int countFetched, int totalCount)? progressUpdated,
|
||||
) async {
|
||||
await _setLocks[network]!.protect(() async {
|
||||
Map<String, dynamic> json;
|
||||
SparkAnonymitySetMeta? meta;
|
||||
const sectorSize = 12000; // TODO adjust this?
|
||||
final prevMeta = await FiroCacheCoordinator.getLatestSetInfoForGroupId(
|
||||
groupId,
|
||||
network,
|
||||
);
|
||||
|
||||
if (progressUpdated == null) {
|
||||
// Legacy
|
||||
final blockhashResult =
|
||||
await FiroCacheCoordinator.getLatestSetInfoForGroupId(
|
||||
groupId,
|
||||
network,
|
||||
final prevSize = prevMeta?.size ?? 0;
|
||||
|
||||
final meta = await client.getSparkAnonymitySetMeta(
|
||||
coinGroupId: groupId,
|
||||
);
|
||||
|
||||
progressUpdated?.call(prevSize, meta.size);
|
||||
|
||||
if (prevMeta?.blockHash == meta.blockHash) {
|
||||
Logging.instance.log(
|
||||
"prevMeta?.blockHash == meta.blockHash",
|
||||
level: LogLevel.Debug,
|
||||
);
|
||||
final blockHash = blockhashResult?.blockHash ?? "";
|
||||
|
||||
json = await client.getSparkAnonymitySet(
|
||||
coinGroupId: groupId.toString(),
|
||||
startBlockHash: blockHash.toHexReversedFromBase64,
|
||||
);
|
||||
} else {
|
||||
const sectorSize = 2000; // TODO adjust this?
|
||||
final prevMetaSize =
|
||||
await FiroCacheCoordinator.getSparkMetaSetSizeForGroupId(
|
||||
groupId,
|
||||
network,
|
||||
);
|
||||
final prevSize = prevMetaSize ?? 0;
|
||||
|
||||
meta = await client.getSparkAnonymitySetMeta(
|
||||
coinGroupId: groupId,
|
||||
);
|
||||
|
||||
progressUpdated.call(prevSize, meta.size);
|
||||
|
||||
/// Returns blockHash (last block hash),
|
||||
/// setHash (hash of current set)
|
||||
/// and coins (the list of pairs serialized coin and tx hash)
|
||||
|
||||
final fullSectorCount = (meta.size - prevSize) ~/ sectorSize;
|
||||
final remainder = (meta.size - prevSize) % sectorSize;
|
||||
|
||||
final List<dynamic> coins = [];
|
||||
|
||||
for (int i = 0; i < fullSectorCount; i++) {
|
||||
final start = (i * sectorSize) + prevSize;
|
||||
final data = await client.getSparkAnonymitySetBySector(
|
||||
coinGroupId: groupId,
|
||||
latestBlock: meta.blockHash.toHexReversedFromBase64,
|
||||
startIndex: start,
|
||||
endIndex: start + sectorSize,
|
||||
);
|
||||
progressUpdated.call(start + sectorSize, meta.size);
|
||||
|
||||
coins.addAll(data);
|
||||
}
|
||||
|
||||
if (remainder > 0) {
|
||||
final data = await client.getSparkAnonymitySetBySector(
|
||||
coinGroupId: groupId,
|
||||
latestBlock: meta.blockHash.toHexReversedFromBase64,
|
||||
startIndex: meta.size - remainder,
|
||||
endIndex: meta.size,
|
||||
);
|
||||
progressUpdated.call(meta.size, meta.size);
|
||||
|
||||
coins.addAll(data);
|
||||
}
|
||||
|
||||
json = {
|
||||
"blockHash": meta.blockHash,
|
||||
"setHash": meta.setHash,
|
||||
"coins": coins,
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
final numberOfCoinsToFetch = meta.size - prevSize;
|
||||
|
||||
final fullSectorCount = numberOfCoinsToFetch ~/ sectorSize;
|
||||
final remainder = numberOfCoinsToFetch % sectorSize;
|
||||
|
||||
final List<dynamic> coins = [];
|
||||
|
||||
for (int i = 0; i < fullSectorCount; i++) {
|
||||
final start = (i * sectorSize);
|
||||
final data = await client.getSparkAnonymitySetBySector(
|
||||
coinGroupId: groupId,
|
||||
latestBlock: meta.blockHash.toHexReversedFromBase64,
|
||||
startIndex: start,
|
||||
endIndex: start + sectorSize,
|
||||
);
|
||||
progressUpdated?.call(start + sectorSize, numberOfCoinsToFetch);
|
||||
|
||||
coins.addAll(data);
|
||||
}
|
||||
|
||||
if (remainder > 0) {
|
||||
final data = await client.getSparkAnonymitySetBySector(
|
||||
coinGroupId: groupId,
|
||||
latestBlock: meta.blockHash.toHexReversedFromBase64,
|
||||
startIndex: numberOfCoinsToFetch - remainder,
|
||||
endIndex: numberOfCoinsToFetch,
|
||||
);
|
||||
progressUpdated?.call(numberOfCoinsToFetch, numberOfCoinsToFetch);
|
||||
|
||||
coins.addAll(data);
|
||||
}
|
||||
|
||||
final result = coins
|
||||
.map((e) => RawSparkCoin.fromRPCResponse(e as List, groupId))
|
||||
.toList();
|
||||
|
||||
await _workers[network]!.runTask(
|
||||
FCTask(
|
||||
func: FCFuncName._updateSparkAnonSetCoinsWith,
|
||||
data: (groupId, json),
|
||||
data: (meta, result),
|
||||
),
|
||||
);
|
||||
|
||||
if (meta != null) {
|
||||
await _workers[network]!.runTask(
|
||||
FCTask(
|
||||
func: FCFuncName._updateSparkAnonSetMetaWith,
|
||||
data: meta,
|
||||
),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -265,28 +233,29 @@ abstract class FiroCacheCoordinator {
|
|||
);
|
||||
}
|
||||
|
||||
static Future<
|
||||
List<
|
||||
({
|
||||
String serialized,
|
||||
String txHash,
|
||||
String context,
|
||||
})>> getSetCoinsForGroupId(
|
||||
static Future<List<RawSparkCoin>> getSetCoinsForGroupId(
|
||||
int groupId, {
|
||||
int? newerThanTimeStamp,
|
||||
String? afterBlockHash,
|
||||
required CryptoCurrencyNetwork network,
|
||||
}) async {
|
||||
final resultSet = await _Reader._getSetCoinsForGroupId(
|
||||
groupId,
|
||||
db: _FiroCache.setCacheDB(network),
|
||||
newerThanTimeStamp: newerThanTimeStamp,
|
||||
);
|
||||
final resultSet = afterBlockHash == null
|
||||
? await _Reader._getSetCoinsForGroupId(
|
||||
groupId,
|
||||
db: _FiroCache.setCacheDB(network),
|
||||
)
|
||||
: await _Reader._getSetCoinsForGroupIdAndBlockHash(
|
||||
groupId,
|
||||
afterBlockHash,
|
||||
db: _FiroCache.setCacheDB(network),
|
||||
);
|
||||
|
||||
return resultSet
|
||||
.map(
|
||||
(row) => (
|
||||
(row) => RawSparkCoin(
|
||||
serialized: row["serialized"] as String,
|
||||
txHash: row["txHash"] as String,
|
||||
context: row["context"] as String,
|
||||
groupId: groupId,
|
||||
),
|
||||
)
|
||||
.toList()
|
||||
|
@ -294,12 +263,7 @@ abstract class FiroCacheCoordinator {
|
|||
.toList();
|
||||
}
|
||||
|
||||
static Future<
|
||||
({
|
||||
String blockHash,
|
||||
String setHash,
|
||||
int timestampUTC,
|
||||
})?> getLatestSetInfoForGroupId(
|
||||
static Future<SparkAnonymitySetMeta?> getLatestSetInfoForGroupId(
|
||||
int groupId,
|
||||
CryptoCurrencyNetwork network,
|
||||
) async {
|
||||
|
@ -312,10 +276,11 @@ abstract class FiroCacheCoordinator {
|
|||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
return SparkAnonymitySetMeta(
|
||||
coinGroupId: groupId,
|
||||
blockHash: result.first["blockHash"] as String,
|
||||
setHash: result.first["setHash"] as String,
|
||||
timestampUTC: result.first["timestampUTC"] as int,
|
||||
size: result.first["size"] as int,
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -328,19 +293,4 @@ abstract class FiroCacheCoordinator {
|
|||
db: _FiroCache.setCacheDB(network),
|
||||
);
|
||||
}
|
||||
|
||||
static Future<int?> getSparkMetaSetSizeForGroupId(
|
||||
int groupId,
|
||||
CryptoCurrencyNetwork network,
|
||||
) async {
|
||||
final result = await _Reader._getSizeForGroupId(
|
||||
groupId,
|
||||
db: _FiroCache.setMetaCacheDB(network),
|
||||
);
|
||||
if (result.isEmpty) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return result.first["size"] as int;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,21 +8,15 @@ abstract class _Reader {
|
|||
static Future<ResultSet> _getSetCoinsForGroupId(
|
||||
int groupId, {
|
||||
required Database db,
|
||||
int? newerThanTimeStamp,
|
||||
}) async {
|
||||
String query = """
|
||||
SELECT sc.serialized, sc.txHash, sc.context
|
||||
final query = """
|
||||
SELECT sc.serialized, sc.txHash, sc.context, sc.groupId
|
||||
FROM SparkSet AS ss
|
||||
JOIN SparkSetCoins AS ssc ON ss.id = ssc.setId
|
||||
JOIN SparkCoin AS sc ON ssc.coinId = sc.id
|
||||
WHERE ss.groupId = $groupId
|
||||
WHERE ss.groupId = $groupId;
|
||||
""";
|
||||
|
||||
if (newerThanTimeStamp != null) {
|
||||
query += " AND ss.timestampUTC"
|
||||
" > $newerThanTimeStamp";
|
||||
}
|
||||
|
||||
return db.select("$query;");
|
||||
}
|
||||
|
||||
|
@ -31,16 +25,45 @@ abstract class _Reader {
|
|||
required Database db,
|
||||
}) async {
|
||||
final query = """
|
||||
SELECT ss.blockHash, ss.setHash, ss.timestampUTC
|
||||
SELECT ss.blockHash, ss.setHash, ss.size
|
||||
FROM SparkSet ss
|
||||
WHERE ss.groupId = $groupId
|
||||
ORDER BY ss.timestampUTC DESC
|
||||
ORDER BY ss.size DESC
|
||||
LIMIT 1;
|
||||
""";
|
||||
|
||||
return db.select("$query;");
|
||||
}
|
||||
|
||||
static Future<ResultSet> _getSetCoinsForGroupIdAndBlockHash(
|
||||
int groupId,
|
||||
String blockHash, {
|
||||
required Database db,
|
||||
}) async {
|
||||
const query = """
|
||||
WITH TargetBlock AS (
|
||||
SELECT id
|
||||
FROM SparkSet
|
||||
WHERE blockHash = ?
|
||||
),
|
||||
TargetSets AS (
|
||||
SELECT id AS setId
|
||||
FROM SparkSet
|
||||
WHERE groupId = ? AND id > (SELECT id FROM TargetBlock)
|
||||
)
|
||||
SELECT
|
||||
SparkCoin.serialized,
|
||||
SparkCoin.txHash,
|
||||
SparkCoin.context,
|
||||
SparkCoin.groupId
|
||||
FROM SparkSetCoins
|
||||
JOIN SparkCoin ON SparkSetCoins.coinId = SparkCoin.id
|
||||
WHERE SparkSetCoins.setId IN (SELECT setId FROM TargetSets);
|
||||
""";
|
||||
|
||||
return db.select("$query;", [blockHash, groupId]);
|
||||
}
|
||||
|
||||
static Future<bool> _checkSetInfoForGroupIdExists(
|
||||
int groupId, {
|
||||
required Database db,
|
||||
|
@ -56,21 +79,6 @@ abstract class _Reader {
|
|||
return db.select("$query;").first["setExists"] == 1;
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// =============== Spark anonymity set meta queries ==========================
|
||||
static Future<ResultSet> _getSizeForGroupId(
|
||||
int groupId, {
|
||||
required Database db,
|
||||
}) async {
|
||||
final query = """
|
||||
SELECT size
|
||||
FROM PreviousMetaFetchResult
|
||||
WHERE coinGroupId = $groupId;
|
||||
""";
|
||||
|
||||
return db.select("$query;");
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// =============== Spark used coin tags queries ==============================
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@ part of 'firo_cache.dart';
|
|||
enum FCFuncName {
|
||||
_updateSparkAnonSetCoinsWith,
|
||||
_updateSparkUsedTagsWith,
|
||||
_updateSparkAnonSetMetaWith,
|
||||
}
|
||||
|
||||
class FCTask {
|
||||
|
@ -30,8 +29,6 @@ class _FiroCacheWorker {
|
|||
final dir = await StackFileSystem.applicationFiroCacheSQLiteDirectory();
|
||||
final setCacheFilePath =
|
||||
"${dir.path}/${_FiroCache.sparkSetCacheFileName(network)}";
|
||||
final setMetaCacheFilePath =
|
||||
"${dir.path}/${_FiroCache.sparkSetMetaCacheFileName(network)}";
|
||||
final usedTagsCacheFilePath =
|
||||
"${dir.path}/${_FiroCache.sparkUsedTagsCacheFileName(network)}";
|
||||
|
||||
|
@ -54,7 +51,6 @@ class _FiroCacheWorker {
|
|||
(
|
||||
initPort.sendPort,
|
||||
setCacheFilePath,
|
||||
setMetaCacheFilePath,
|
||||
usedTagsCacheFilePath,
|
||||
),
|
||||
);
|
||||
|
@ -87,7 +83,6 @@ class _FiroCacheWorker {
|
|||
ReceivePort receivePort,
|
||||
SendPort sendPort,
|
||||
Database setCacheDb,
|
||||
Database setMetaCacheDb,
|
||||
Database usedTagsCacheDb,
|
||||
Mutex mutex,
|
||||
) {
|
||||
|
@ -99,7 +94,8 @@ class _FiroCacheWorker {
|
|||
final FCResult result;
|
||||
switch (task.func) {
|
||||
case FCFuncName._updateSparkAnonSetCoinsWith:
|
||||
final data = task.data as (int, Map<String, dynamic>);
|
||||
final data =
|
||||
task.data as (SparkAnonymitySetMeta, List<RawSparkCoin>);
|
||||
result = _updateSparkAnonSetCoinsWith(
|
||||
setCacheDb,
|
||||
data.$2,
|
||||
|
@ -113,13 +109,6 @@ class _FiroCacheWorker {
|
|||
task.data as List<List<dynamic>>,
|
||||
);
|
||||
break;
|
||||
|
||||
case FCFuncName._updateSparkAnonSetMetaWith:
|
||||
result = _updateSparkAnonSetMetaWith(
|
||||
setMetaCacheDb,
|
||||
task.data as SparkAnonymitySetMeta,
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
if (result.success) {
|
||||
|
@ -134,7 +123,7 @@ class _FiroCacheWorker {
|
|||
});
|
||||
}
|
||||
|
||||
static void _startWorkerIsolate((SendPort, String, String, String) args) {
|
||||
static void _startWorkerIsolate((SendPort, String, String) args) {
|
||||
final receivePort = ReceivePort();
|
||||
args.$1.send(receivePort.sendPort);
|
||||
final mutex = Mutex();
|
||||
|
@ -142,19 +131,14 @@ class _FiroCacheWorker {
|
|||
args.$2,
|
||||
mode: OpenMode.readWrite,
|
||||
);
|
||||
final setMetaCacheDb = sqlite3.open(
|
||||
args.$3,
|
||||
mode: OpenMode.readWrite,
|
||||
);
|
||||
final usedTagsCacheDb = sqlite3.open(
|
||||
args.$4,
|
||||
args.$3,
|
||||
mode: OpenMode.readWrite,
|
||||
);
|
||||
_handleCommandsToIsolate(
|
||||
receivePort,
|
||||
args.$1,
|
||||
setCacheDb,
|
||||
setMetaCacheDb,
|
||||
usedTagsCacheDb,
|
||||
mutex,
|
||||
);
|
||||
|
|
|
@ -48,58 +48,17 @@ FCResult _updateSparkUsedTagsWith(
|
|||
}
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// ================== write to spark anon set Meta cache ==========================
|
||||
FCResult _updateSparkAnonSetMetaWith(
|
||||
Database db,
|
||||
SparkAnonymitySetMeta meta,
|
||||
) {
|
||||
db.execute("BEGIN;");
|
||||
try {
|
||||
db.execute(
|
||||
"""
|
||||
INSERT OR REPLACE INTO PreviousMetaFetchResult (coinGroupId, blockHash, setHash, size)
|
||||
VALUES (?, ?, ?, ?);
|
||||
""",
|
||||
[meta.coinGroupId, meta.blockHash, meta.setHash, meta.size],
|
||||
);
|
||||
|
||||
db.execute("COMMIT;");
|
||||
|
||||
return FCResult(success: true);
|
||||
} catch (e) {
|
||||
db.execute("ROLLBACK;");
|
||||
return FCResult(success: false, error: e);
|
||||
}
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// ================== write to spark anon set cache ==========================
|
||||
|
||||
/// update the sqlite cache
|
||||
/// Expected json format:
|
||||
/// {
|
||||
/// "blockHash": "someBlockHash",
|
||||
/// "setHash": "someSetHash",
|
||||
/// "coins": [
|
||||
/// ["serliazed1", "hash1", "context1"],
|
||||
/// ["serliazed2", "hash2", "context2"],
|
||||
/// ...
|
||||
/// ["serliazed3", "hash3", "context3"],
|
||||
/// ["serliazed4", "hash4", "context4"],
|
||||
/// ],
|
||||
/// }
|
||||
///
|
||||
/// returns true if successful, otherwise false
|
||||
FCResult _updateSparkAnonSetCoinsWith(
|
||||
Database db,
|
||||
Map<String, dynamic> json,
|
||||
int groupId,
|
||||
final List<RawSparkCoin> coinsRaw,
|
||||
SparkAnonymitySetMeta meta,
|
||||
) {
|
||||
final blockHash = json["blockHash"] as String;
|
||||
final setHash = json["setHash"] as String;
|
||||
final coinsRaw = json["coins"] as List;
|
||||
|
||||
if (coinsRaw.isEmpty) {
|
||||
// no coins to actually insert
|
||||
return FCResult(success: true);
|
||||
|
@ -112,9 +71,9 @@ FCResult _updateSparkAnonSetCoinsWith(
|
|||
WHERE blockHash = ? AND setHash = ? AND groupId = ?;
|
||||
""",
|
||||
[
|
||||
blockHash,
|
||||
setHash,
|
||||
groupId,
|
||||
meta.blockHash,
|
||||
meta.setHash,
|
||||
meta.coinGroupId,
|
||||
],
|
||||
);
|
||||
|
||||
|
@ -123,59 +82,28 @@ FCResult _updateSparkAnonSetCoinsWith(
|
|||
return FCResult(success: true);
|
||||
}
|
||||
|
||||
final coins = coinsRaw
|
||||
.map(
|
||||
(e) => [
|
||||
e[0] as String,
|
||||
e[1] as String,
|
||||
e[2] as String,
|
||||
],
|
||||
)
|
||||
.toList()
|
||||
.reversed;
|
||||
|
||||
final timestamp = DateTime.now().toUtc().millisecondsSinceEpoch ~/ 1000;
|
||||
final coins = coinsRaw.reversed;
|
||||
|
||||
db.execute("BEGIN;");
|
||||
try {
|
||||
db.execute(
|
||||
"""
|
||||
INSERT INTO SparkSet (blockHash, setHash, groupId, timestampUTC)
|
||||
INSERT INTO SparkSet (blockHash, setHash, groupId, size)
|
||||
VALUES (?, ?, ?, ?);
|
||||
""",
|
||||
[blockHash, setHash, groupId, timestamp],
|
||||
[meta.blockHash, meta.setHash, meta.coinGroupId, meta.size],
|
||||
);
|
||||
final setId = db.lastInsertRowId;
|
||||
|
||||
for (final coin in coins) {
|
||||
int coinId;
|
||||
try {
|
||||
// try to insert and get row id
|
||||
db.execute(
|
||||
"""
|
||||
INSERT INTO SparkCoin (serialized, txHash, context)
|
||||
VALUES (?, ?, ?);
|
||||
db.execute(
|
||||
"""
|
||||
INSERT INTO SparkCoin (serialized, txHash, context, groupId)
|
||||
VALUES (?, ?, ?, ?);
|
||||
""",
|
||||
coin,
|
||||
);
|
||||
coinId = db.lastInsertRowId;
|
||||
} on SqliteException catch (e) {
|
||||
// if there already is a matching coin in the db
|
||||
// just grab its row id
|
||||
if (e.extendedResultCode == 2067) {
|
||||
final result = db.select(
|
||||
"""
|
||||
SELECT id
|
||||
FROM SparkCoin
|
||||
WHERE serialized = ? AND txHash = ? AND context = ?;
|
||||
""",
|
||||
coin,
|
||||
);
|
||||
coinId = result.first["id"] as int;
|
||||
} else {
|
||||
rethrow;
|
||||
}
|
||||
}
|
||||
[coin.serialized, coin.txHash, coin.context, coin.groupId],
|
||||
);
|
||||
final coinId = db.lastInsertRowId;
|
||||
|
||||
// finally add the row id to the newly added set
|
||||
db.execute(
|
||||
|
|
|
@ -399,7 +399,11 @@ class ElectrumXClient {
|
|||
rethrow;
|
||||
}
|
||||
} catch (e) {
|
||||
final errorMessage = e.toString();
|
||||
Logging.instance.log("$host $e", level: LogLevel.Debug);
|
||||
if (errorMessage.contains("JSON-RPC error")) {
|
||||
currentFailoverIndex = _failovers.length;
|
||||
}
|
||||
if (currentFailoverIndex < _failovers.length - 1) {
|
||||
currentFailoverIndex++;
|
||||
return request(
|
||||
|
|
|
@ -45,3 +45,54 @@ class SparkAnonymitySetMeta {
|
|||
"}";
|
||||
}
|
||||
}
|
||||
|
||||
class RawSparkCoin {
|
||||
final String serialized;
|
||||
final String txHash;
|
||||
final String context;
|
||||
final int groupId;
|
||||
|
||||
RawSparkCoin({
|
||||
required this.serialized,
|
||||
required this.txHash,
|
||||
required this.context,
|
||||
required this.groupId,
|
||||
});
|
||||
|
||||
static RawSparkCoin fromRPCResponse(List<dynamic> data, int groupId) {
|
||||
try {
|
||||
if (data.length != 3) throw Exception();
|
||||
return RawSparkCoin(
|
||||
serialized: data[0] as String,
|
||||
txHash: data[1] as String,
|
||||
context: data[2] as String,
|
||||
groupId: groupId,
|
||||
);
|
||||
} catch (_) {
|
||||
throw Exception("Invalid coin data: $data");
|
||||
}
|
||||
}
|
||||
|
||||
@override
|
||||
bool operator ==(Object other) {
|
||||
if (identical(this, other)) return true;
|
||||
if (other is! RawSparkCoin) return false;
|
||||
return serialized == other.serialized &&
|
||||
txHash == other.txHash &&
|
||||
groupId == other.groupId &&
|
||||
context == other.context;
|
||||
}
|
||||
|
||||
@override
|
||||
int get hashCode => Object.hash(serialized, txHash, context);
|
||||
|
||||
@override
|
||||
String toString() {
|
||||
return "SparkAnonymitySetMeta{"
|
||||
"serialized: $serialized, "
|
||||
"txHash: $txHash, "
|
||||
"context: $context, "
|
||||
"groupId: $groupId"
|
||||
"}";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -520,8 +520,8 @@ abstract class WalletInfoKeys {
|
|||
static const String lelantusCoinIsarRescanRequired =
|
||||
"lelantusCoinIsarRescanRequired";
|
||||
static const String enableLelantusScanning = "enableLelantusScanningKey";
|
||||
static const String firoSparkCacheSetTimestampCache =
|
||||
"firoSparkCacheSetTimestampCacheKey";
|
||||
static const String firoSparkCacheSetBlockHashCache =
|
||||
"firoSparkCacheSetBlockHashCacheKey";
|
||||
static const String enableOptInRbf = "enableOptInRbfKey";
|
||||
static const String reuseAddress = "reuseAddressKey";
|
||||
static const String isViewOnlyKey = "isViewOnlyKey";
|
||||
|
|
|
@ -671,7 +671,7 @@ class FiroWallet<T extends ElectrumXCurrencyInterface> extends Bip39HDWallet<T>
|
|||
// reset last checked values
|
||||
await info.updateOtherData(
|
||||
newEntries: {
|
||||
WalletInfoKeys.firoSparkCacheSetTimestampCache: <String, int>{},
|
||||
WalletInfoKeys.firoSparkCacheSetBlockHashCache: <String, String>{},
|
||||
},
|
||||
isar: mainDB.isar,
|
||||
);
|
||||
|
|
|
@ -886,11 +886,11 @@ mixin SparkInterface<T extends ElectrumXCurrencyInterface>
|
|||
currentPercent = _triggerEventHelper(currentPercent, percentIncrement);
|
||||
}
|
||||
|
||||
// Get cached timestamps per groupId. These timestamps are used to check
|
||||
// Get cached block hashes per groupId. These hashes are used to check
|
||||
// and try to id coins that were added to the spark anon set cache
|
||||
// after that timestamp.
|
||||
final groupIdTimestampUTCMap =
|
||||
info.otherData[WalletInfoKeys.firoSparkCacheSetTimestampCache]
|
||||
// after that block.
|
||||
final groupIdBlockHashMap =
|
||||
info.otherData[WalletInfoKeys.firoSparkCacheSetBlockHashCache]
|
||||
as Map? ??
|
||||
{};
|
||||
|
||||
|
@ -898,8 +898,7 @@ mixin SparkInterface<T extends ElectrumXCurrencyInterface>
|
|||
// processed by this wallet yet
|
||||
final Map<int, List<List<String>>> rawCoinsBySetId = {};
|
||||
for (int i = 1; i <= latestGroupId; i++) {
|
||||
final lastCheckedTimeStampUTC =
|
||||
groupIdTimestampUTCMap[i.toString()] as int? ?? 0;
|
||||
final lastCheckedHash = groupIdBlockHashMap[i.toString()] as String?;
|
||||
final info = await FiroCacheCoordinator.getLatestSetInfoForGroupId(
|
||||
i,
|
||||
cryptoCurrency.network,
|
||||
|
@ -907,7 +906,7 @@ mixin SparkInterface<T extends ElectrumXCurrencyInterface>
|
|||
final anonymitySetResult =
|
||||
await FiroCacheCoordinator.getSetCoinsForGroupId(
|
||||
i,
|
||||
newerThanTimeStamp: lastCheckedTimeStampUTC,
|
||||
afterBlockHash: lastCheckedHash,
|
||||
network: cryptoCurrency.network,
|
||||
);
|
||||
final coinsRaw = anonymitySetResult
|
||||
|
@ -924,11 +923,8 @@ mixin SparkInterface<T extends ElectrumXCurrencyInterface>
|
|||
rawCoinsBySetId[i] = coinsRaw;
|
||||
}
|
||||
|
||||
// update last checked timestamp data
|
||||
groupIdTimestampUTCMap[i.toString()] = max(
|
||||
lastCheckedTimeStampUTC,
|
||||
info?.timestampUTC ?? lastCheckedTimeStampUTC,
|
||||
);
|
||||
// update last checked
|
||||
groupIdBlockHashMap[i.toString()] = info?.blockHash;
|
||||
}
|
||||
|
||||
if (percentIncrement != null) {
|
||||
|
@ -973,11 +969,10 @@ mixin SparkInterface<T extends ElectrumXCurrencyInterface>
|
|||
});
|
||||
}
|
||||
|
||||
// finally update the cached timestamps in the database
|
||||
// finally update the cached block hashes in the database
|
||||
await info.updateOtherData(
|
||||
newEntries: {
|
||||
WalletInfoKeys.firoSparkCacheSetTimestampCache:
|
||||
groupIdTimestampUTCMap,
|
||||
WalletInfoKeys.firoSparkCacheSetBlockHashCache: groupIdBlockHashMap,
|
||||
},
|
||||
isar: mainDB.isar,
|
||||
);
|
||||
|
|
Loading…
Reference in a new issue