utils.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916
  1. # This file is dual licensed under the terms of the Apache License, Version
  2. # 2.0, and the BSD License. See the LICENSE file in the root of this repository
  3. # for complete details.
  4. from __future__ import absolute_import, division, print_function
  5. import binascii
  6. import collections
  7. import json
  8. import os
  9. import re
  10. from contextlib import contextmanager
  11. import pytest
  12. import six
  13. from cryptography.exceptions import UnsupportedAlgorithm
  14. import cryptography_vectors
  15. HashVector = collections.namedtuple("HashVector", ["message", "digest"])
  16. KeyedHashVector = collections.namedtuple(
  17. "KeyedHashVector", ["message", "digest", "key"]
  18. )
  19. def check_backend_support(backend, item):
  20. for mark in item.node.iter_markers("supported"):
  21. if not mark.kwargs["only_if"](backend):
  22. pytest.skip("{} ({})".format(mark.kwargs["skip_message"], backend))
  23. @contextmanager
  24. def raises_unsupported_algorithm(reason):
  25. with pytest.raises(UnsupportedAlgorithm) as exc_info:
  26. yield exc_info
  27. assert exc_info.value._reason is reason
  28. def load_vectors_from_file(filename, loader, mode="r"):
  29. with cryptography_vectors.open_vector_file(filename, mode) as vector_file:
  30. return loader(vector_file)
  31. def load_nist_vectors(vector_data):
  32. test_data = None
  33. data = []
  34. for line in vector_data:
  35. line = line.strip()
  36. # Blank lines, comments, and section headers are ignored
  37. if (
  38. not line
  39. or line.startswith("#")
  40. or (line.startswith("[") and line.endswith("]"))
  41. ):
  42. continue
  43. if line.strip() == "FAIL":
  44. test_data["fail"] = True
  45. continue
  46. # Build our data using a simple Key = Value format
  47. name, value = [c.strip() for c in line.split("=")]
  48. # Some tests (PBKDF2) contain \0, which should be interpreted as a
  49. # null character rather than literal.
  50. value = value.replace("\\0", "\0")
  51. # COUNT is a special token that indicates a new block of data
  52. if name.upper() == "COUNT":
  53. test_data = {}
  54. data.append(test_data)
  55. continue
  56. # For all other tokens we simply want the name, value stored in
  57. # the dictionary
  58. else:
  59. test_data[name.lower()] = value.encode("ascii")
  60. return data
  61. def load_cryptrec_vectors(vector_data):
  62. cryptrec_list = []
  63. for line in vector_data:
  64. line = line.strip()
  65. # Blank lines and comments are ignored
  66. if not line or line.startswith("#"):
  67. continue
  68. if line.startswith("K"):
  69. key = line.split(" : ")[1].replace(" ", "").encode("ascii")
  70. elif line.startswith("P"):
  71. pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
  72. elif line.startswith("C"):
  73. ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
  74. # after a C is found the K+P+C tuple is complete
  75. # there are many P+C pairs for each K
  76. cryptrec_list.append(
  77. {"key": key, "plaintext": pt, "ciphertext": ct}
  78. )
  79. else:
  80. raise ValueError("Invalid line in file '{}'".format(line))
  81. return cryptrec_list
  82. def load_hash_vectors(vector_data):
  83. vectors = []
  84. key = None
  85. msg = None
  86. md = None
  87. for line in vector_data:
  88. line = line.strip()
  89. if not line or line.startswith("#") or line.startswith("["):
  90. continue
  91. if line.startswith("Len"):
  92. length = int(line.split(" = ")[1])
  93. elif line.startswith("Key"):
  94. # HMAC vectors contain a key attribute. Hash vectors do not.
  95. key = line.split(" = ")[1].encode("ascii")
  96. elif line.startswith("Msg"):
  97. # In the NIST vectors they have chosen to represent an empty
  98. # string as hex 00, which is of course not actually an empty
  99. # string. So we parse the provided length and catch this edge case.
  100. msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
  101. elif line.startswith("MD") or line.startswith("Output"):
  102. md = line.split(" = ")[1]
  103. # after MD is found the Msg+MD (+ potential key) tuple is complete
  104. if key is not None:
  105. vectors.append(KeyedHashVector(msg, md, key))
  106. key = None
  107. msg = None
  108. md = None
  109. else:
  110. vectors.append(HashVector(msg, md))
  111. msg = None
  112. md = None
  113. else:
  114. raise ValueError("Unknown line in hash vector")
  115. return vectors
  116. def load_pkcs1_vectors(vector_data):
  117. """
  118. Loads data out of RSA PKCS #1 vector files.
  119. """
  120. private_key_vector = None
  121. public_key_vector = None
  122. attr = None
  123. key = None
  124. example_vector = None
  125. examples = []
  126. vectors = []
  127. for line in vector_data:
  128. if (
  129. line.startswith("# PSS Example")
  130. or line.startswith("# OAEP Example")
  131. or line.startswith("# PKCS#1 v1.5")
  132. ):
  133. if example_vector:
  134. for key, value in six.iteritems(example_vector):
  135. hex_str = "".join(value).replace(" ", "").encode("ascii")
  136. example_vector[key] = hex_str
  137. examples.append(example_vector)
  138. attr = None
  139. example_vector = collections.defaultdict(list)
  140. if line.startswith("# Message"):
  141. attr = "message"
  142. continue
  143. elif line.startswith("# Salt"):
  144. attr = "salt"
  145. continue
  146. elif line.startswith("# Seed"):
  147. attr = "seed"
  148. continue
  149. elif line.startswith("# Signature"):
  150. attr = "signature"
  151. continue
  152. elif line.startswith("# Encryption"):
  153. attr = "encryption"
  154. continue
  155. elif example_vector and line.startswith(
  156. "# ============================================="
  157. ):
  158. for key, value in six.iteritems(example_vector):
  159. hex_str = "".join(value).replace(" ", "").encode("ascii")
  160. example_vector[key] = hex_str
  161. examples.append(example_vector)
  162. example_vector = None
  163. attr = None
  164. elif example_vector and line.startswith("#"):
  165. continue
  166. else:
  167. if attr is not None and example_vector is not None:
  168. example_vector[attr].append(line.strip())
  169. continue
  170. if line.startswith("# Example") or line.startswith(
  171. "# ============================================="
  172. ):
  173. if key:
  174. assert private_key_vector
  175. assert public_key_vector
  176. for key, value in six.iteritems(public_key_vector):
  177. hex_str = "".join(value).replace(" ", "")
  178. public_key_vector[key] = int(hex_str, 16)
  179. for key, value in six.iteritems(private_key_vector):
  180. hex_str = "".join(value).replace(" ", "")
  181. private_key_vector[key] = int(hex_str, 16)
  182. private_key_vector["examples"] = examples
  183. examples = []
  184. assert (
  185. private_key_vector["public_exponent"]
  186. == public_key_vector["public_exponent"]
  187. )
  188. assert (
  189. private_key_vector["modulus"]
  190. == public_key_vector["modulus"]
  191. )
  192. vectors.append((private_key_vector, public_key_vector))
  193. public_key_vector = collections.defaultdict(list)
  194. private_key_vector = collections.defaultdict(list)
  195. key = None
  196. attr = None
  197. if private_key_vector is None or public_key_vector is None:
  198. # Random garbage to defeat CPython's peephole optimizer so that
  199. # coverage records correctly: https://bugs.python.org/issue2506
  200. 1 + 1
  201. continue
  202. if line.startswith("# Private key"):
  203. key = private_key_vector
  204. elif line.startswith("# Public key"):
  205. key = public_key_vector
  206. elif line.startswith("# Modulus:"):
  207. attr = "modulus"
  208. elif line.startswith("# Public exponent:"):
  209. attr = "public_exponent"
  210. elif line.startswith("# Exponent:"):
  211. if key is public_key_vector:
  212. attr = "public_exponent"
  213. else:
  214. assert key is private_key_vector
  215. attr = "private_exponent"
  216. elif line.startswith("# Prime 1:"):
  217. attr = "p"
  218. elif line.startswith("# Prime 2:"):
  219. attr = "q"
  220. elif line.startswith("# Prime exponent 1:"):
  221. attr = "dmp1"
  222. elif line.startswith("# Prime exponent 2:"):
  223. attr = "dmq1"
  224. elif line.startswith("# Coefficient:"):
  225. attr = "iqmp"
  226. elif line.startswith("#"):
  227. attr = None
  228. else:
  229. if key is not None and attr is not None:
  230. key[attr].append(line.strip())
  231. return vectors
  232. def load_rsa_nist_vectors(vector_data):
  233. test_data = None
  234. p = None
  235. salt_length = None
  236. data = []
  237. for line in vector_data:
  238. line = line.strip()
  239. # Blank lines and section headers are ignored
  240. if not line or line.startswith("["):
  241. continue
  242. if line.startswith("# Salt len:"):
  243. salt_length = int(line.split(":")[1].strip())
  244. continue
  245. elif line.startswith("#"):
  246. continue
  247. # Build our data using a simple Key = Value format
  248. name, value = [c.strip() for c in line.split("=")]
  249. if name == "n":
  250. n = int(value, 16)
  251. elif name == "e" and p is None:
  252. e = int(value, 16)
  253. elif name == "p":
  254. p = int(value, 16)
  255. elif name == "q":
  256. q = int(value, 16)
  257. elif name == "SHAAlg":
  258. if p is None:
  259. test_data = {
  260. "modulus": n,
  261. "public_exponent": e,
  262. "salt_length": salt_length,
  263. "algorithm": value,
  264. "fail": False,
  265. }
  266. else:
  267. test_data = {"modulus": n, "p": p, "q": q, "algorithm": value}
  268. if salt_length is not None:
  269. test_data["salt_length"] = salt_length
  270. data.append(test_data)
  271. elif name == "e" and p is not None:
  272. test_data["public_exponent"] = int(value, 16)
  273. elif name == "d":
  274. test_data["private_exponent"] = int(value, 16)
  275. elif name == "Result":
  276. test_data["fail"] = value.startswith("F")
  277. # For all other tokens we simply want the name, value stored in
  278. # the dictionary
  279. else:
  280. test_data[name.lower()] = value.encode("ascii")
  281. return data
  282. def load_fips_dsa_key_pair_vectors(vector_data):
  283. """
  284. Loads data out of the FIPS DSA KeyPair vector files.
  285. """
  286. vectors = []
  287. for line in vector_data:
  288. line = line.strip()
  289. if not line or line.startswith("#") or line.startswith("[mod"):
  290. continue
  291. if line.startswith("P"):
  292. vectors.append({"p": int(line.split("=")[1], 16)})
  293. elif line.startswith("Q"):
  294. vectors[-1]["q"] = int(line.split("=")[1], 16)
  295. elif line.startswith("G"):
  296. vectors[-1]["g"] = int(line.split("=")[1], 16)
  297. elif line.startswith("X") and "x" not in vectors[-1]:
  298. vectors[-1]["x"] = int(line.split("=")[1], 16)
  299. elif line.startswith("X") and "x" in vectors[-1]:
  300. vectors.append(
  301. {
  302. "p": vectors[-1]["p"],
  303. "q": vectors[-1]["q"],
  304. "g": vectors[-1]["g"],
  305. "x": int(line.split("=")[1], 16),
  306. }
  307. )
  308. elif line.startswith("Y"):
  309. vectors[-1]["y"] = int(line.split("=")[1], 16)
  310. return vectors
  311. def load_fips_dsa_sig_vectors(vector_data):
  312. """
  313. Loads data out of the FIPS DSA SigVer vector files.
  314. """
  315. vectors = []
  316. sha_regex = re.compile(
  317. r"\[mod = L=...., N=..., SHA-(?P<sha>1|224|256|384|512)\]"
  318. )
  319. for line in vector_data:
  320. line = line.strip()
  321. if not line or line.startswith("#"):
  322. continue
  323. sha_match = sha_regex.match(line)
  324. if sha_match:
  325. digest_algorithm = "SHA-{}".format(sha_match.group("sha"))
  326. if line.startswith("[mod"):
  327. continue
  328. name, value = [c.strip() for c in line.split("=")]
  329. if name == "P":
  330. vectors.append(
  331. {"p": int(value, 16), "digest_algorithm": digest_algorithm}
  332. )
  333. elif name == "Q":
  334. vectors[-1]["q"] = int(value, 16)
  335. elif name == "G":
  336. vectors[-1]["g"] = int(value, 16)
  337. elif name == "Msg" and "msg" not in vectors[-1]:
  338. hexmsg = value.strip().encode("ascii")
  339. vectors[-1]["msg"] = binascii.unhexlify(hexmsg)
  340. elif name == "Msg" and "msg" in vectors[-1]:
  341. hexmsg = value.strip().encode("ascii")
  342. vectors.append(
  343. {
  344. "p": vectors[-1]["p"],
  345. "q": vectors[-1]["q"],
  346. "g": vectors[-1]["g"],
  347. "digest_algorithm": vectors[-1]["digest_algorithm"],
  348. "msg": binascii.unhexlify(hexmsg),
  349. }
  350. )
  351. elif name == "X":
  352. vectors[-1]["x"] = int(value, 16)
  353. elif name == "Y":
  354. vectors[-1]["y"] = int(value, 16)
  355. elif name == "R":
  356. vectors[-1]["r"] = int(value, 16)
  357. elif name == "S":
  358. vectors[-1]["s"] = int(value, 16)
  359. elif name == "Result":
  360. vectors[-1]["result"] = value.split("(")[0].strip()
  361. return vectors
  362. # https://tools.ietf.org/html/rfc4492#appendix-A
  363. _ECDSA_CURVE_NAMES = {
  364. "P-192": "secp192r1",
  365. "P-224": "secp224r1",
  366. "P-256": "secp256r1",
  367. "P-384": "secp384r1",
  368. "P-521": "secp521r1",
  369. "K-163": "sect163k1",
  370. "K-233": "sect233k1",
  371. "K-256": "secp256k1",
  372. "K-283": "sect283k1",
  373. "K-409": "sect409k1",
  374. "K-571": "sect571k1",
  375. "B-163": "sect163r2",
  376. "B-233": "sect233r1",
  377. "B-283": "sect283r1",
  378. "B-409": "sect409r1",
  379. "B-571": "sect571r1",
  380. }
  381. def load_fips_ecdsa_key_pair_vectors(vector_data):
  382. """
  383. Loads data out of the FIPS ECDSA KeyPair vector files.
  384. """
  385. vectors = []
  386. key_data = None
  387. for line in vector_data:
  388. line = line.strip()
  389. if not line or line.startswith("#"):
  390. continue
  391. if line[1:-1] in _ECDSA_CURVE_NAMES:
  392. curve_name = _ECDSA_CURVE_NAMES[line[1:-1]]
  393. elif line.startswith("d = "):
  394. if key_data is not None:
  395. vectors.append(key_data)
  396. key_data = {"curve": curve_name, "d": int(line.split("=")[1], 16)}
  397. elif key_data is not None:
  398. if line.startswith("Qx = "):
  399. key_data["x"] = int(line.split("=")[1], 16)
  400. elif line.startswith("Qy = "):
  401. key_data["y"] = int(line.split("=")[1], 16)
  402. assert key_data is not None
  403. vectors.append(key_data)
  404. return vectors
  405. def load_fips_ecdsa_signing_vectors(vector_data):
  406. """
  407. Loads data out of the FIPS ECDSA SigGen vector files.
  408. """
  409. vectors = []
  410. curve_rx = re.compile(
  411. r"\[(?P<curve>[PKB]-[0-9]{3}),SHA-(?P<sha>1|224|256|384|512)\]"
  412. )
  413. data = None
  414. for line in vector_data:
  415. line = line.strip()
  416. curve_match = curve_rx.match(line)
  417. if curve_match:
  418. curve_name = _ECDSA_CURVE_NAMES[curve_match.group("curve")]
  419. digest_name = "SHA-{}".format(curve_match.group("sha"))
  420. elif line.startswith("Msg = "):
  421. if data is not None:
  422. vectors.append(data)
  423. hexmsg = line.split("=")[1].strip().encode("ascii")
  424. data = {
  425. "curve": curve_name,
  426. "digest_algorithm": digest_name,
  427. "message": binascii.unhexlify(hexmsg),
  428. }
  429. elif data is not None:
  430. if line.startswith("Qx = "):
  431. data["x"] = int(line.split("=")[1], 16)
  432. elif line.startswith("Qy = "):
  433. data["y"] = int(line.split("=")[1], 16)
  434. elif line.startswith("R = "):
  435. data["r"] = int(line.split("=")[1], 16)
  436. elif line.startswith("S = "):
  437. data["s"] = int(line.split("=")[1], 16)
  438. elif line.startswith("d = "):
  439. data["d"] = int(line.split("=")[1], 16)
  440. elif line.startswith("Result = "):
  441. data["fail"] = line.split("=")[1].strip()[0] == "F"
  442. assert data is not None
  443. vectors.append(data)
  444. return vectors
  445. def load_kasvs_dh_vectors(vector_data):
  446. """
  447. Loads data out of the KASVS key exchange vector data
  448. """
  449. result_rx = re.compile(r"([FP]) \(([0-9]+) -")
  450. vectors = []
  451. data = {"fail_z": False, "fail_agree": False}
  452. for line in vector_data:
  453. line = line.strip()
  454. if not line or line.startswith("#"):
  455. continue
  456. if line.startswith("P = "):
  457. data["p"] = int(line.split("=")[1], 16)
  458. elif line.startswith("Q = "):
  459. data["q"] = int(line.split("=")[1], 16)
  460. elif line.startswith("G = "):
  461. data["g"] = int(line.split("=")[1], 16)
  462. elif line.startswith("Z = "):
  463. z_hex = line.split("=")[1].strip().encode("ascii")
  464. data["z"] = binascii.unhexlify(z_hex)
  465. elif line.startswith("XstatCAVS = "):
  466. data["x1"] = int(line.split("=")[1], 16)
  467. elif line.startswith("YstatCAVS = "):
  468. data["y1"] = int(line.split("=")[1], 16)
  469. elif line.startswith("XstatIUT = "):
  470. data["x2"] = int(line.split("=")[1], 16)
  471. elif line.startswith("YstatIUT = "):
  472. data["y2"] = int(line.split("=")[1], 16)
  473. elif line.startswith("Result = "):
  474. result_str = line.split("=")[1].strip()
  475. match = result_rx.match(result_str)
  476. if match.group(1) == "F":
  477. if int(match.group(2)) in (5, 10):
  478. data["fail_z"] = True
  479. else:
  480. data["fail_agree"] = True
  481. vectors.append(data)
  482. data = {
  483. "p": data["p"],
  484. "q": data["q"],
  485. "g": data["g"],
  486. "fail_z": False,
  487. "fail_agree": False,
  488. }
  489. return vectors
  490. def load_kasvs_ecdh_vectors(vector_data):
  491. """
  492. Loads data out of the KASVS key exchange vector data
  493. """
  494. curve_name_map = {
  495. "P-192": "secp192r1",
  496. "P-224": "secp224r1",
  497. "P-256": "secp256r1",
  498. "P-384": "secp384r1",
  499. "P-521": "secp521r1",
  500. }
  501. result_rx = re.compile(r"([FP]) \(([0-9]+) -")
  502. tags = []
  503. sets = {}
  504. vectors = []
  505. # find info in header
  506. for line in vector_data:
  507. line = line.strip()
  508. if line.startswith("#"):
  509. parm = line.split("Parameter set(s) supported:")
  510. if len(parm) == 2:
  511. names = parm[1].strip().split()
  512. for n in names:
  513. tags.append("[%s]" % n)
  514. break
  515. # Sets Metadata
  516. tag = None
  517. curve = None
  518. for line in vector_data:
  519. line = line.strip()
  520. if not line or line.startswith("#"):
  521. continue
  522. if line in tags:
  523. tag = line
  524. curve = None
  525. elif line.startswith("[Curve selected:"):
  526. curve = curve_name_map[line.split(":")[1].strip()[:-1]]
  527. if tag is not None and curve is not None:
  528. sets[tag.strip("[]")] = curve
  529. tag = None
  530. if len(tags) == len(sets):
  531. break
  532. # Data
  533. data = {
  534. "CAVS": {},
  535. "IUT": {},
  536. }
  537. tag = None
  538. for line in vector_data:
  539. line = line.strip()
  540. if not line or line.startswith("#"):
  541. continue
  542. if line.startswith("["):
  543. tag = line.split()[0][1:]
  544. elif line.startswith("COUNT = "):
  545. data["COUNT"] = int(line.split("=")[1])
  546. elif line.startswith("dsCAVS = "):
  547. data["CAVS"]["d"] = int(line.split("=")[1], 16)
  548. elif line.startswith("QsCAVSx = "):
  549. data["CAVS"]["x"] = int(line.split("=")[1], 16)
  550. elif line.startswith("QsCAVSy = "):
  551. data["CAVS"]["y"] = int(line.split("=")[1], 16)
  552. elif line.startswith("dsIUT = "):
  553. data["IUT"]["d"] = int(line.split("=")[1], 16)
  554. elif line.startswith("QsIUTx = "):
  555. data["IUT"]["x"] = int(line.split("=")[1], 16)
  556. elif line.startswith("QsIUTy = "):
  557. data["IUT"]["y"] = int(line.split("=")[1], 16)
  558. elif line.startswith("OI = "):
  559. data["OI"] = int(line.split("=")[1], 16)
  560. elif line.startswith("Z = "):
  561. data["Z"] = int(line.split("=")[1], 16)
  562. elif line.startswith("DKM = "):
  563. data["DKM"] = int(line.split("=")[1], 16)
  564. elif line.startswith("Result = "):
  565. result_str = line.split("=")[1].strip()
  566. match = result_rx.match(result_str)
  567. if match.group(1) == "F":
  568. data["fail"] = True
  569. else:
  570. data["fail"] = False
  571. data["errno"] = int(match.group(2))
  572. data["curve"] = sets[tag]
  573. vectors.append(data)
  574. data = {
  575. "CAVS": {},
  576. "IUT": {},
  577. }
  578. return vectors
  579. def load_x963_vectors(vector_data):
  580. """
  581. Loads data out of the X9.63 vector data
  582. """
  583. vectors = []
  584. # Sets Metadata
  585. hashname = None
  586. vector = {}
  587. for line in vector_data:
  588. line = line.strip()
  589. if line.startswith("[SHA"):
  590. hashname = line[1:-1]
  591. shared_secret_len = 0
  592. shared_info_len = 0
  593. key_data_len = 0
  594. elif line.startswith("[shared secret length"):
  595. shared_secret_len = int(line[1:-1].split("=")[1].strip())
  596. elif line.startswith("[SharedInfo length"):
  597. shared_info_len = int(line[1:-1].split("=")[1].strip())
  598. elif line.startswith("[key data length"):
  599. key_data_len = int(line[1:-1].split("=")[1].strip())
  600. elif line.startswith("COUNT"):
  601. count = int(line.split("=")[1].strip())
  602. vector["hash"] = hashname
  603. vector["count"] = count
  604. vector["shared_secret_length"] = shared_secret_len
  605. vector["sharedinfo_length"] = shared_info_len
  606. vector["key_data_length"] = key_data_len
  607. elif line.startswith("Z"):
  608. vector["Z"] = line.split("=")[1].strip()
  609. assert ((shared_secret_len + 7) // 8) * 2 == len(vector["Z"])
  610. elif line.startswith("SharedInfo"):
  611. if shared_info_len != 0:
  612. vector["sharedinfo"] = line.split("=")[1].strip()
  613. silen = len(vector["sharedinfo"])
  614. assert ((shared_info_len + 7) // 8) * 2 == silen
  615. elif line.startswith("key_data"):
  616. vector["key_data"] = line.split("=")[1].strip()
  617. assert ((key_data_len + 7) // 8) * 2 == len(vector["key_data"])
  618. vectors.append(vector)
  619. vector = {}
  620. return vectors
  621. def load_nist_kbkdf_vectors(vector_data):
  622. """
  623. Load NIST SP 800-108 KDF Vectors
  624. """
  625. vectors = []
  626. test_data = None
  627. tag = {}
  628. for line in vector_data:
  629. line = line.strip()
  630. if not line or line.startswith("#"):
  631. continue
  632. if line.startswith("[") and line.endswith("]"):
  633. tag_data = line[1:-1]
  634. name, value = [c.strip() for c in tag_data.split("=")]
  635. if value.endswith("_BITS"):
  636. value = int(value.split("_")[0])
  637. tag.update({name.lower(): value})
  638. continue
  639. tag.update({name.lower(): value.lower()})
  640. elif line.startswith("COUNT="):
  641. test_data = {}
  642. test_data.update(tag)
  643. vectors.append(test_data)
  644. elif line.startswith("L"):
  645. name, value = [c.strip() for c in line.split("=")]
  646. test_data[name.lower()] = int(value)
  647. else:
  648. name, value = [c.strip() for c in line.split("=")]
  649. test_data[name.lower()] = value.encode("ascii")
  650. return vectors
  651. def load_ed25519_vectors(vector_data):
  652. data = []
  653. for line in vector_data:
  654. secret_key, public_key, message, signature, _ = line.split(":")
  655. # In the vectors the first element is secret key + public key
  656. secret_key = secret_key[0:64]
  657. # In the vectors the signature section is signature + message
  658. signature = signature[0:128]
  659. data.append(
  660. {
  661. "secret_key": secret_key,
  662. "public_key": public_key,
  663. "message": message,
  664. "signature": signature,
  665. }
  666. )
  667. return data
  668. def load_nist_ccm_vectors(vector_data):
  669. test_data = None
  670. section_data = None
  671. global_data = {}
  672. new_section = False
  673. data = []
  674. for line in vector_data:
  675. line = line.strip()
  676. # Blank lines and comments should be ignored
  677. if not line or line.startswith("#"):
  678. continue
  679. # Some of the CCM vectors have global values for this. They are always
  680. # at the top before the first section header (see: VADT, VNT, VPT)
  681. if line.startswith(("Alen", "Plen", "Nlen", "Tlen")):
  682. name, value = [c.strip() for c in line.split("=")]
  683. global_data[name.lower()] = int(value)
  684. continue
  685. # section headers contain length data we might care about
  686. if line.startswith("["):
  687. new_section = True
  688. section_data = {}
  689. section = line[1:-1]
  690. items = [c.strip() for c in section.split(",")]
  691. for item in items:
  692. name, value = [c.strip() for c in item.split("=")]
  693. section_data[name.lower()] = int(value)
  694. continue
  695. name, value = [c.strip() for c in line.split("=")]
  696. if name.lower() in ("key", "nonce") and new_section:
  697. section_data[name.lower()] = value.encode("ascii")
  698. continue
  699. new_section = False
  700. # Payload is sometimes special because these vectors are absurd. Each
  701. # example may or may not have a payload. If it does not then the
  702. # previous example's payload should be used. We accomplish this by
  703. # writing it into the section_data. Because we update each example
  704. # with the section data it will be overwritten if a new payload value
  705. # is present. NIST should be ashamed of their vector creation.
  706. if name.lower() == "payload":
  707. section_data[name.lower()] = value.encode("ascii")
  708. # Result is a special token telling us if the test should pass/fail.
  709. # This is only present in the DVPT CCM tests
  710. if name.lower() == "result":
  711. if value.lower() == "pass":
  712. test_data["fail"] = False
  713. else:
  714. test_data["fail"] = True
  715. continue
  716. # COUNT is a special token that indicates a new block of data
  717. if name.lower() == "count":
  718. test_data = {}
  719. test_data.update(global_data)
  720. test_data.update(section_data)
  721. data.append(test_data)
  722. continue
  723. # For all other tokens we simply want the name, value stored in
  724. # the dictionary
  725. else:
  726. test_data[name.lower()] = value.encode("ascii")
  727. return data
  728. class WycheproofTest(object):
  729. def __init__(self, testfiledata, testgroup, testcase):
  730. self.testfiledata = testfiledata
  731. self.testgroup = testgroup
  732. self.testcase = testcase
  733. def __repr__(self):
  734. return "<WycheproofTest({!r}, {!r}, {!r}, tcId={})>".format(
  735. self.testfiledata,
  736. self.testgroup,
  737. self.testcase,
  738. self.testcase["tcId"],
  739. )
  740. @property
  741. def valid(self):
  742. return self.testcase["result"] == "valid"
  743. @property
  744. def acceptable(self):
  745. return self.testcase["result"] == "acceptable"
  746. @property
  747. def invalid(self):
  748. return self.testcase["result"] == "invalid"
  749. def has_flag(self, flag):
  750. return flag in self.testcase["flags"]
  751. def load_wycheproof_tests(wycheproof, test_file):
  752. path = os.path.join(wycheproof, "testvectors", test_file)
  753. with open(path) as f:
  754. data = json.load(f)
  755. for group in data.pop("testGroups"):
  756. cases = group.pop("tests")
  757. for c in cases:
  758. yield WycheproofTest(data, group, c)