Include mnemonic dependency directly.

This commit is contained in:
tecnovert 2024-05-29 09:39:13 +02:00
parent d3e3c3c95b
commit ae1df0b556
24 changed files with 24894 additions and 12 deletions

View file

@ -0,0 +1,3 @@
from .mnemonic import Mnemonic
__all__ = ["Mnemonic"]

View file

@ -0,0 +1,298 @@
#
# Copyright (c) 2013 Pavol Rusnak
# Copyright (c) 2017 mruddy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import annotations
import hashlib
import hmac
import itertools
import os
import secrets
import typing as t
import unicodedata
PBKDF2_ROUNDS = 2048
class ConfigurationError(Exception):
pass
# Refactored code segments from <https://github.com/keis/base58>
def b58encode(v: bytes) -> str:
alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
p, acc = 1, 0
for c in reversed(v):
acc += p * c
p = p << 8
string = ""
while acc:
acc, idx = divmod(acc, 58)
string = alphabet[idx : idx + 1] + string
return string
class Mnemonic(object):
def __init__(self, language: str = "english", wordlist: list[str] | None = None):
self.radix = 2048
self.language = language
if wordlist is None:
d = os.path.join(os.path.dirname(__file__), f"wordlist/{language}.txt")
if os.path.exists(d) and os.path.isfile(d):
with open(d, "r", encoding="utf-8") as f:
wordlist = [w.strip() for w in f.readlines()]
else:
raise ConfigurationError("Language not detected")
if len(wordlist) != self.radix:
raise ConfigurationError(f"Wordlist must contain {self.radix} words.")
self.wordlist = wordlist
# Japanese must be joined by ideographic space
self.delimiter = "\u3000" if language == "japanese" else " "
@classmethod
def list_languages(cls) -> list[str]:
return [
f.split(".")[0]
for f in os.listdir(os.path.join(os.path.dirname(__file__), "wordlist"))
if f.endswith(".txt")
]
@staticmethod
def normalize_string(txt: t.AnyStr) -> str:
if isinstance(txt, bytes):
utxt = txt.decode("utf8")
elif isinstance(txt, str):
utxt = txt
else:
raise TypeError("String value expected")
return unicodedata.normalize("NFKD", utxt)
@classmethod
def detect_language(cls, code: str) -> str:
"""Scan the Mnemonic until the language becomes unambiguous, including as abbreviation prefixes.
Unfortunately, there are valid words that are ambiguous between languages, which are complete words
in one language and are prefixes in another:
english: abandon ... about
french: abandon ... aboutir
If prefixes remain ambiguous, require exactly one language where word(s) match exactly.
"""
code = cls.normalize_string(code)
possible = set(cls(lang) for lang in cls.list_languages())
words = set(code.split())
for word in words:
# possible languages have candidate(s) starting with the word/prefix
possible = set(
p for p in possible if any(c.startswith(word) for c in p.wordlist)
)
if not possible:
raise ConfigurationError(f"Language unrecognized for {word!r}")
if len(possible) == 1:
return possible.pop().language
# Multiple languages match: A prefix in many, but an exact match in one determines language.
complete = set()
for word in words:
exact = set(p for p in possible if word in p.wordlist)
if len(exact) == 1:
complete.update(exact)
if len(complete) == 1:
return complete.pop().language
raise ConfigurationError(
f"Language ambiguous between {', '.join(p.language for p in possible)}"
)
def generate(self, strength: int = 128) -> str:
"""
Create a new mnemonic using a random generated number as entropy.
As defined in BIP39, the entropy must be a multiple of 32 bits, and its size must be between 128 and 256 bits.
Therefore the possible values for `strength` are 128, 160, 192, 224 and 256.
If not provided, the default entropy length will be set to 128 bits.
The return is a list of words that encodes the generated entropy.
:param strength: Number of bytes used as entropy
:type strength: int
:return: A randomly generated mnemonic
:rtype: str
"""
if strength not in [128, 160, 192, 224, 256]:
raise ValueError(
"Invalid strength value. Allowed values are [128, 160, 192, 224, 256]."
)
return self.to_mnemonic(secrets.token_bytes(strength // 8))
# Adapted from <http://tinyurl.com/oxmn476>
def to_entropy(self, words: list[str] | str) -> bytearray:
if not isinstance(words, list):
words = words.split(" ")
if len(words) not in [12, 15, 18, 21, 24]:
raise ValueError(
"Number of words must be one of the following: [12, 15, 18, 21, 24], but it is not (%d)."
% len(words)
)
# Look up all the words in the list and construct the
# concatenation of the original entropy and the checksum.
concatLenBits = len(words) * 11
concatBits = [False] * concatLenBits
wordindex = 0
for word in words:
# Find the words index in the wordlist
ndx = self.wordlist.index(self.normalize_string(word))
if ndx < 0:
raise LookupError('Unable to find "%s" in word list.' % word)
# Set the next 11 bits to the value of the index.
for ii in range(11):
concatBits[(wordindex * 11) + ii] = (ndx & (1 << (10 - ii))) != 0
wordindex += 1
checksumLengthBits = concatLenBits // 33
entropyLengthBits = concatLenBits - checksumLengthBits
# Extract original entropy as bytes.
entropy = bytearray(entropyLengthBits // 8)
for ii in range(len(entropy)):
for jj in range(8):
if concatBits[(ii * 8) + jj]:
entropy[ii] |= 1 << (7 - jj)
# Take the digest of the entropy.
hashBytes = hashlib.sha256(entropy).digest()
hashBits = list(
itertools.chain.from_iterable(
[c & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes
)
)
# Check all the checksum bits.
for i in range(checksumLengthBits):
if concatBits[entropyLengthBits + i] != hashBits[i]:
raise ValueError("Failed checksum.")
return entropy
def to_mnemonic(self, data: bytes) -> str:
if len(data) not in [16, 20, 24, 28, 32]:
raise ValueError(
f"Data length should be one of the following: [16, 20, 24, 28, 32], but it is not {len(data)}."
)
h = hashlib.sha256(data).hexdigest()
b = (
bin(int.from_bytes(data, byteorder="big"))[2:].zfill(len(data) * 8)
+ bin(int(h, 16))[2:].zfill(256)[: len(data) * 8 // 32]
)
result = []
for i in range(len(b) // 11):
idx = int(b[i * 11 : (i + 1) * 11], 2)
result.append(self.wordlist[idx])
return self.delimiter.join(result)
def check(self, mnemonic: str) -> bool:
mnemonic_list = self.normalize_string(mnemonic).split(" ")
# list of valid mnemonic lengths
if len(mnemonic_list) not in [12, 15, 18, 21, 24]:
return False
try:
idx = map(
lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic_list
)
b = "".join(idx)
except ValueError:
return False
l = len(b) # noqa: E741
d = b[: l // 33 * 32]
h = b[-l // 33 :]
nd = int(d, 2).to_bytes(l // 33 * 4, byteorder="big")
nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[: l // 33]
return h == nh
def expand_word(self, prefix: str) -> str:
if prefix in self.wordlist:
return prefix
else:
matches = [word for word in self.wordlist if word.startswith(prefix)]
if len(matches) == 1: # matched exactly one word in the wordlist
return matches[0]
else:
# exact match not found.
# this is not a validation routine, just return the input
return prefix
def expand(self, mnemonic: str) -> str:
return " ".join(map(self.expand_word, mnemonic.split(" ")))
@classmethod
def to_seed(cls, mnemonic: str, passphrase: str = "") -> bytes:
mnemonic = cls.normalize_string(mnemonic)
passphrase = cls.normalize_string(passphrase)
passphrase = "mnemonic" + passphrase
mnemonic_bytes = mnemonic.encode("utf-8")
passphrase_bytes = passphrase.encode("utf-8")
stretched = hashlib.pbkdf2_hmac(
"sha512", mnemonic_bytes, passphrase_bytes, PBKDF2_ROUNDS
)
return stretched[:64]
@staticmethod
def to_hd_master_key(seed: bytes, testnet: bool = False) -> str:
if len(seed) != 64:
raise ValueError("Provided seed should have length of 64")
# Compute HMAC-SHA512 of seed
seed = hmac.new(b"Bitcoin seed", seed, digestmod=hashlib.sha512).digest()
# Serialization format can be found at: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format
xprv = b"\x04\x88\xad\xe4" # Version for private mainnet
if testnet:
xprv = b"\x04\x35\x83\x94" # Version for private testnet
xprv += b"\x00" * 9 # Depth, parent fingerprint, and child number
xprv += seed[32:] # Chain code
xprv += b"\x00" + seed[:32] # Master key
# Double hash using SHA256
hashed_xprv = hashlib.sha256(xprv).digest()
hashed_xprv = hashlib.sha256(hashed_xprv).digest()
# Append 4 bytes of checksum
xprv += hashed_xprv[:4]
# Return base58
return b58encode(xprv)
def main() -> None:
import sys
if len(sys.argv) > 1:
hex_data = sys.argv[1]
else:
hex_data = sys.stdin.readline().strip()
data = bytes.fromhex(hex_data)
m = Mnemonic("english")
print(m.to_mnemonic(data))
if __name__ == "__main__":
main()

View file

@ -0,0 +1 @@
# Marker file for PEP 561.

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,7 @@
from .btc import BTCInterface
from basicswap.chainparams import Coins
from basicswap.util.address import decodeAddress
from mnemonic import Mnemonic
from basicswap.contrib.mnemonic import Mnemonic
from basicswap.contrib.test_framework.script import (
CScript,
OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG
@ -25,11 +25,11 @@ class DASHInterface(BTCInterface):
self._wallet_passphrase = ''
self._have_checked_seed = False
def seedToMnemonic(self, key: bytes) -> str:
def entropyToMnemonic(self, key: bytes) -> str:
return Mnemonic('english').to_mnemonic(key)
def initialiseWallet(self, key: bytes):
words = self.seedToMnemonic(key)
words = self.entropyToMnemonic(key)
mnemonic_passphrase = ''
self.rpc_wallet('upgradetohd', [words, mnemonic_passphrase, self._wallet_passphrase])

View file

@ -21,6 +21,7 @@ from basicswap.interface.btc import (
)
from basicswap.rpc import make_rpc_func
from basicswap.chainparams import Coins
from basicswap.contrib.mnemonic import Mnemonic
from basicswap.interface.contrib.nav_test_framework.mininode import (
CTxIn,
CTxOut,
@ -52,7 +53,6 @@ from basicswap.interface.contrib.nav_test_framework.script import (
SIGHASH_ALL,
SegwitVersion1SignatureHash,
)
from mnemonic import Mnemonic
class NAVInterface(BTCInterface):
@ -77,7 +77,7 @@ class NAVInterface(BTCInterface):
# p2sh-p2wsh
return True
def seedToMnemonic(self, key: bytes) -> None:
def entropyToMnemonic(self, key: bytes) -> None:
return Mnemonic('english').to_mnemonic(key)
def initialiseWallet(self, key):

View file

@ -688,7 +688,7 @@ def js_getcoinseed(self, url_split, post_string, is_json) -> bytes:
seed_key = swap_client.getWalletKey(coin, 1)
if coin == Coins.DASH:
return bytes(json.dumps({'coin': ci.ticker(), 'seed': seed_key.hex(), 'mnemonic': ci.seedToMnemonic(seed_key)}), 'UTF-8')
return bytes(json.dumps({'coin': ci.ticker(), 'seed': seed_key.hex(), 'mnemonic': ci.entropyToMnemonic(seed_key)}), 'UTF-8')
seed_id = ci.getSeedHash(seed_key)
return bytes(json.dumps({'coin': ci.ticker(), 'seed': seed_key.hex(), 'seed_id': seed_id.hex()}), 'UTF-8')

View file

@ -2,6 +2,7 @@
==============
- Remove protobuf and protoc dependencies.
- Include mnemonic dependency directly.
0.13.1

View file

@ -154,7 +154,6 @@
python-gnupg
python-jinja2
python-pysocks
python-mnemonic
))
(native-inputs
(list

View file

@ -5,4 +5,3 @@ python-gnupg
Jinja2
pycryptodome
PySocks
mnemonic

View file

@ -38,7 +38,6 @@ setuptools.setup(
"Jinja2",
"pycryptodome",
"PySocks",
"mnemonic",
],
entry_points={
"console_scripts": [

View file

@ -61,7 +61,7 @@ from basicswap.interface.contrib.nav_test_framework.script import (
from bin.basicswap_run import startDaemon
from basicswap.contrib.rpcauth import generate_salt, password_to_hmac
from tests.basicswap.test_xmr import test_delay_event, callnoderpc
from mnemonic import Mnemonic
from basicswap.contrib.mnemonic import Mnemonic
from tests.basicswap.test_btc_xmr import TestFunctions
@ -152,7 +152,6 @@ class Test(TestFunctions):
if not cls.restore_instance:
seed_hex = cls.nav_seeds[i]
mnemonic = Mnemonic('english').to_mnemonic(bytes.fromhex(seed_hex))
print('mnemonic', mnemonic)
extra_opts.append(f'-importmnemonic={mnemonic}')
data_dir = prepareDataDir(cfg.TEST_DATADIRS, i, 'navcoin.conf', 'nav_', base_p2p_port=NAV_BASE_PORT, base_rpc_port=NAV_BASE_RPC_PORT)

View file

@ -22,6 +22,7 @@ from coincurve.ecdsaotves import (
from coincurve.keys import (
PrivateKey)
from basicswap.contrib.mnemonic import Mnemonic
from basicswap.util import i2b, h2b
from basicswap.util.address import decodeAddress
from basicswap.util.crypto import ripemd160, hash160, blake256
@ -32,6 +33,7 @@ from basicswap.util.rfc2440 import rfc2440_hash_password
from basicswap.util_xmr import encode_address as xmr_encode_address
from basicswap.interface.btc import BTCInterface
from basicswap.interface.xmr import XMRInterface
from tests.basicswap.mnemonics import mnemonics
from tests.basicswap.util import REQUIRED_SETTINGS
from basicswap.basicswap_util import (
@ -489,7 +491,6 @@ class Test(unittest.TestCase):
assert (blake256(data).hex() == expect_hash)
def test_extkey(self):
test_key = 'XPARHAr37YxmFP8wyjkaHAQWmp84GiyLikL7EL8j9BCx4LkB8Q1Bw5Kr8sA1GA3Ym53zNLcaxxFHr6u81JVTeCaD61c6fKS1YRAuti8Zu5SzJCjh'
test_key_c0 = 'XPARHAt1XMcNYAwP5wEnQXknBAkGSzaetdZt2eoJZehdB4WXfV1xbSjpgHe44AivmumcSejW5KaYx6L5M6MyR1WyXrsWTwaiUEfHq2RrqCfXj3ZW'
test_key_c0_p = 'PPARTKPL4rp5WLnrYP6jZfuRjx6jrmvbsz5QdHofPfFqJdm918mQwdPLq6Dd9TkdbQeKUqjbHWkyzWe7Pftd7itzm7ETEoUMq4cbG4fY9FKH1YSU'
@ -519,6 +520,12 @@ class Test(unittest.TestCase):
ek_c0_p_data = decodeAddress(test_key_c0_p)[4:]
assert (m_0.encode_p() == ek_c0_p_data)
def test_mnemonic(self):
entropy0: bytes = Mnemonic('english').to_entropy(mnemonics[0])
assert (entropy0.hex() == '0002207e9b744ea2d7ab41702f31f000')
mnemonic_recovered: str = Mnemonic('english').to_mnemonic(entropy0)
assert (mnemonic_recovered == mnemonics[0])
if __name__ == '__main__':
unittest.main()