utils.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. # This file is dual licensed under the terms of the Apache License, Version
  2. # 2.0, and the BSD License. See the LICENSE file in the root of this repository
  3. # for complete details.
  4. from __future__ import absolute_import, division, print_function
  5. import binascii
  6. import collections
  7. import json
  8. import math
  9. import os
  10. import re
  11. from contextlib import contextmanager
  12. import pytest
  13. import six
  14. from cryptography.exceptions import UnsupportedAlgorithm
  15. import cryptography_vectors
  16. HashVector = collections.namedtuple("HashVector", ["message", "digest"])
  17. KeyedHashVector = collections.namedtuple(
  18. "KeyedHashVector", ["message", "digest", "key"]
  19. )
  20. def check_backend_support(backend, item):
  21. for mark in item.node.iter_markers("supported"):
  22. if not mark.kwargs["only_if"](backend):
  23. pytest.skip("{} ({})".format(
  24. mark.kwargs["skip_message"], backend
  25. ))
  26. @contextmanager
  27. def raises_unsupported_algorithm(reason):
  28. with pytest.raises(UnsupportedAlgorithm) as exc_info:
  29. yield exc_info
  30. assert exc_info.value._reason is reason
  31. def load_vectors_from_file(filename, loader, mode="r"):
  32. with cryptography_vectors.open_vector_file(filename, mode) as vector_file:
  33. return loader(vector_file)
  34. def load_nist_vectors(vector_data):
  35. test_data = None
  36. data = []
  37. for line in vector_data:
  38. line = line.strip()
  39. # Blank lines, comments, and section headers are ignored
  40. if not line or line.startswith("#") or (line.startswith("[") and
  41. line.endswith("]")):
  42. continue
  43. if line.strip() == "FAIL":
  44. test_data["fail"] = True
  45. continue
  46. # Build our data using a simple Key = Value format
  47. name, value = [c.strip() for c in line.split("=")]
  48. # Some tests (PBKDF2) contain \0, which should be interpreted as a
  49. # null character rather than literal.
  50. value = value.replace("\\0", "\0")
  51. # COUNT is a special token that indicates a new block of data
  52. if name.upper() == "COUNT":
  53. test_data = {}
  54. data.append(test_data)
  55. continue
  56. # For all other tokens we simply want the name, value stored in
  57. # the dictionary
  58. else:
  59. test_data[name.lower()] = value.encode("ascii")
  60. return data
  61. def load_cryptrec_vectors(vector_data):
  62. cryptrec_list = []
  63. for line in vector_data:
  64. line = line.strip()
  65. # Blank lines and comments are ignored
  66. if not line or line.startswith("#"):
  67. continue
  68. if line.startswith("K"):
  69. key = line.split(" : ")[1].replace(" ", "").encode("ascii")
  70. elif line.startswith("P"):
  71. pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
  72. elif line.startswith("C"):
  73. ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
  74. # after a C is found the K+P+C tuple is complete
  75. # there are many P+C pairs for each K
  76. cryptrec_list.append({
  77. "key": key,
  78. "plaintext": pt,
  79. "ciphertext": ct
  80. })
  81. else:
  82. raise ValueError("Invalid line in file '{}'".format(line))
  83. return cryptrec_list
  84. def load_hash_vectors(vector_data):
  85. vectors = []
  86. key = None
  87. msg = None
  88. md = None
  89. for line in vector_data:
  90. line = line.strip()
  91. if not line or line.startswith("#") or line.startswith("["):
  92. continue
  93. if line.startswith("Len"):
  94. length = int(line.split(" = ")[1])
  95. elif line.startswith("Key"):
  96. # HMAC vectors contain a key attribute. Hash vectors do not.
  97. key = line.split(" = ")[1].encode("ascii")
  98. elif line.startswith("Msg"):
  99. # In the NIST vectors they have chosen to represent an empty
  100. # string as hex 00, which is of course not actually an empty
  101. # string. So we parse the provided length and catch this edge case.
  102. msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
  103. elif line.startswith("MD") or line.startswith("Output"):
  104. md = line.split(" = ")[1]
  105. # after MD is found the Msg+MD (+ potential key) tuple is complete
  106. if key is not None:
  107. vectors.append(KeyedHashVector(msg, md, key))
  108. key = None
  109. msg = None
  110. md = None
  111. else:
  112. vectors.append(HashVector(msg, md))
  113. msg = None
  114. md = None
  115. else:
  116. raise ValueError("Unknown line in hash vector")
  117. return vectors
  118. def load_pkcs1_vectors(vector_data):
  119. """
  120. Loads data out of RSA PKCS #1 vector files.
  121. """
  122. private_key_vector = None
  123. public_key_vector = None
  124. attr = None
  125. key = None
  126. example_vector = None
  127. examples = []
  128. vectors = []
  129. for line in vector_data:
  130. if (
  131. line.startswith("# PSS Example") or
  132. line.startswith("# OAEP Example") or
  133. line.startswith("# PKCS#1 v1.5")
  134. ):
  135. if example_vector:
  136. for key, value in six.iteritems(example_vector):
  137. hex_str = "".join(value).replace(" ", "").encode("ascii")
  138. example_vector[key] = hex_str
  139. examples.append(example_vector)
  140. attr = None
  141. example_vector = collections.defaultdict(list)
  142. if line.startswith("# Message"):
  143. attr = "message"
  144. continue
  145. elif line.startswith("# Salt"):
  146. attr = "salt"
  147. continue
  148. elif line.startswith("# Seed"):
  149. attr = "seed"
  150. continue
  151. elif line.startswith("# Signature"):
  152. attr = "signature"
  153. continue
  154. elif line.startswith("# Encryption"):
  155. attr = "encryption"
  156. continue
  157. elif (
  158. example_vector and
  159. line.startswith("# =============================================")
  160. ):
  161. for key, value in six.iteritems(example_vector):
  162. hex_str = "".join(value).replace(" ", "").encode("ascii")
  163. example_vector[key] = hex_str
  164. examples.append(example_vector)
  165. example_vector = None
  166. attr = None
  167. elif example_vector and line.startswith("#"):
  168. continue
  169. else:
  170. if attr is not None and example_vector is not None:
  171. example_vector[attr].append(line.strip())
  172. continue
  173. if (
  174. line.startswith("# Example") or
  175. line.startswith("# =============================================")
  176. ):
  177. if key:
  178. assert private_key_vector
  179. assert public_key_vector
  180. for key, value in six.iteritems(public_key_vector):
  181. hex_str = "".join(value).replace(" ", "")
  182. public_key_vector[key] = int(hex_str, 16)
  183. for key, value in six.iteritems(private_key_vector):
  184. hex_str = "".join(value).replace(" ", "")
  185. private_key_vector[key] = int(hex_str, 16)
  186. private_key_vector["examples"] = examples
  187. examples = []
  188. assert (
  189. private_key_vector['public_exponent'] ==
  190. public_key_vector['public_exponent']
  191. )
  192. assert (
  193. private_key_vector['modulus'] ==
  194. public_key_vector['modulus']
  195. )
  196. vectors.append(
  197. (private_key_vector, public_key_vector)
  198. )
  199. public_key_vector = collections.defaultdict(list)
  200. private_key_vector = collections.defaultdict(list)
  201. key = None
  202. attr = None
  203. if private_key_vector is None or public_key_vector is None:
  204. # Random garbage to defeat CPython's peephole optimizer so that
  205. # coverage records correctly: https://bugs.python.org/issue2506
  206. 1 + 1
  207. continue
  208. if line.startswith("# Private key"):
  209. key = private_key_vector
  210. elif line.startswith("# Public key"):
  211. key = public_key_vector
  212. elif line.startswith("# Modulus:"):
  213. attr = "modulus"
  214. elif line.startswith("# Public exponent:"):
  215. attr = "public_exponent"
  216. elif line.startswith("# Exponent:"):
  217. if key is public_key_vector:
  218. attr = "public_exponent"
  219. else:
  220. assert key is private_key_vector
  221. attr = "private_exponent"
  222. elif line.startswith("# Prime 1:"):
  223. attr = "p"
  224. elif line.startswith("# Prime 2:"):
  225. attr = "q"
  226. elif line.startswith("# Prime exponent 1:"):
  227. attr = "dmp1"
  228. elif line.startswith("# Prime exponent 2:"):
  229. attr = "dmq1"
  230. elif line.startswith("# Coefficient:"):
  231. attr = "iqmp"
  232. elif line.startswith("#"):
  233. attr = None
  234. else:
  235. if key is not None and attr is not None:
  236. key[attr].append(line.strip())
  237. return vectors
  238. def load_rsa_nist_vectors(vector_data):
  239. test_data = None
  240. p = None
  241. salt_length = None
  242. data = []
  243. for line in vector_data:
  244. line = line.strip()
  245. # Blank lines and section headers are ignored
  246. if not line or line.startswith("["):
  247. continue
  248. if line.startswith("# Salt len:"):
  249. salt_length = int(line.split(":")[1].strip())
  250. continue
  251. elif line.startswith("#"):
  252. continue
  253. # Build our data using a simple Key = Value format
  254. name, value = [c.strip() for c in line.split("=")]
  255. if name == "n":
  256. n = int(value, 16)
  257. elif name == "e" and p is None:
  258. e = int(value, 16)
  259. elif name == "p":
  260. p = int(value, 16)
  261. elif name == "q":
  262. q = int(value, 16)
  263. elif name == "SHAAlg":
  264. if p is None:
  265. test_data = {
  266. "modulus": n,
  267. "public_exponent": e,
  268. "salt_length": salt_length,
  269. "algorithm": value,
  270. "fail": False
  271. }
  272. else:
  273. test_data = {
  274. "modulus": n,
  275. "p": p,
  276. "q": q,
  277. "algorithm": value
  278. }
  279. if salt_length is not None:
  280. test_data["salt_length"] = salt_length
  281. data.append(test_data)
  282. elif name == "e" and p is not None:
  283. test_data["public_exponent"] = int(value, 16)
  284. elif name == "d":
  285. test_data["private_exponent"] = int(value, 16)
  286. elif name == "Result":
  287. test_data["fail"] = value.startswith("F")
  288. # For all other tokens we simply want the name, value stored in
  289. # the dictionary
  290. else:
  291. test_data[name.lower()] = value.encode("ascii")
  292. return data
  293. def load_fips_dsa_key_pair_vectors(vector_data):
  294. """
  295. Loads data out of the FIPS DSA KeyPair vector files.
  296. """
  297. vectors = []
  298. for line in vector_data:
  299. line = line.strip()
  300. if not line or line.startswith("#") or line.startswith("[mod"):
  301. continue
  302. if line.startswith("P"):
  303. vectors.append({'p': int(line.split("=")[1], 16)})
  304. elif line.startswith("Q"):
  305. vectors[-1]['q'] = int(line.split("=")[1], 16)
  306. elif line.startswith("G"):
  307. vectors[-1]['g'] = int(line.split("=")[1], 16)
  308. elif line.startswith("X") and 'x' not in vectors[-1]:
  309. vectors[-1]['x'] = int(line.split("=")[1], 16)
  310. elif line.startswith("X") and 'x' in vectors[-1]:
  311. vectors.append({'p': vectors[-1]['p'],
  312. 'q': vectors[-1]['q'],
  313. 'g': vectors[-1]['g'],
  314. 'x': int(line.split("=")[1], 16)
  315. })
  316. elif line.startswith("Y"):
  317. vectors[-1]['y'] = int(line.split("=")[1], 16)
  318. return vectors
  319. def load_fips_dsa_sig_vectors(vector_data):
  320. """
  321. Loads data out of the FIPS DSA SigVer vector files.
  322. """
  323. vectors = []
  324. sha_regex = re.compile(
  325. r"\[mod = L=...., N=..., SHA-(?P<sha>1|224|256|384|512)\]"
  326. )
  327. for line in vector_data:
  328. line = line.strip()
  329. if not line or line.startswith("#"):
  330. continue
  331. sha_match = sha_regex.match(line)
  332. if sha_match:
  333. digest_algorithm = "SHA-{}".format(sha_match.group("sha"))
  334. if line.startswith("[mod"):
  335. continue
  336. name, value = [c.strip() for c in line.split("=")]
  337. if name == "P":
  338. vectors.append({'p': int(value, 16),
  339. 'digest_algorithm': digest_algorithm})
  340. elif name == "Q":
  341. vectors[-1]['q'] = int(value, 16)
  342. elif name == "G":
  343. vectors[-1]['g'] = int(value, 16)
  344. elif name == "Msg" and 'msg' not in vectors[-1]:
  345. hexmsg = value.strip().encode("ascii")
  346. vectors[-1]['msg'] = binascii.unhexlify(hexmsg)
  347. elif name == "Msg" and 'msg' in vectors[-1]:
  348. hexmsg = value.strip().encode("ascii")
  349. vectors.append({'p': vectors[-1]['p'],
  350. 'q': vectors[-1]['q'],
  351. 'g': vectors[-1]['g'],
  352. 'digest_algorithm':
  353. vectors[-1]['digest_algorithm'],
  354. 'msg': binascii.unhexlify(hexmsg)})
  355. elif name == "X":
  356. vectors[-1]['x'] = int(value, 16)
  357. elif name == "Y":
  358. vectors[-1]['y'] = int(value, 16)
  359. elif name == "R":
  360. vectors[-1]['r'] = int(value, 16)
  361. elif name == "S":
  362. vectors[-1]['s'] = int(value, 16)
  363. elif name == "Result":
  364. vectors[-1]['result'] = value.split("(")[0].strip()
  365. return vectors
  366. # https://tools.ietf.org/html/rfc4492#appendix-A
  367. _ECDSA_CURVE_NAMES = {
  368. "P-192": "secp192r1",
  369. "P-224": "secp224r1",
  370. "P-256": "secp256r1",
  371. "P-384": "secp384r1",
  372. "P-521": "secp521r1",
  373. "K-163": "sect163k1",
  374. "K-233": "sect233k1",
  375. "K-256": "secp256k1",
  376. "K-283": "sect283k1",
  377. "K-409": "sect409k1",
  378. "K-571": "sect571k1",
  379. "B-163": "sect163r2",
  380. "B-233": "sect233r1",
  381. "B-283": "sect283r1",
  382. "B-409": "sect409r1",
  383. "B-571": "sect571r1",
  384. }
  385. def load_fips_ecdsa_key_pair_vectors(vector_data):
  386. """
  387. Loads data out of the FIPS ECDSA KeyPair vector files.
  388. """
  389. vectors = []
  390. key_data = None
  391. for line in vector_data:
  392. line = line.strip()
  393. if not line or line.startswith("#"):
  394. continue
  395. if line[1:-1] in _ECDSA_CURVE_NAMES:
  396. curve_name = _ECDSA_CURVE_NAMES[line[1:-1]]
  397. elif line.startswith("d = "):
  398. if key_data is not None:
  399. vectors.append(key_data)
  400. key_data = {
  401. "curve": curve_name,
  402. "d": int(line.split("=")[1], 16)
  403. }
  404. elif key_data is not None:
  405. if line.startswith("Qx = "):
  406. key_data["x"] = int(line.split("=")[1], 16)
  407. elif line.startswith("Qy = "):
  408. key_data["y"] = int(line.split("=")[1], 16)
  409. assert key_data is not None
  410. vectors.append(key_data)
  411. return vectors
  412. def load_fips_ecdsa_signing_vectors(vector_data):
  413. """
  414. Loads data out of the FIPS ECDSA SigGen vector files.
  415. """
  416. vectors = []
  417. curve_rx = re.compile(
  418. r"\[(?P<curve>[PKB]-[0-9]{3}),SHA-(?P<sha>1|224|256|384|512)\]"
  419. )
  420. data = None
  421. for line in vector_data:
  422. line = line.strip()
  423. curve_match = curve_rx.match(line)
  424. if curve_match:
  425. curve_name = _ECDSA_CURVE_NAMES[curve_match.group("curve")]
  426. digest_name = "SHA-{}".format(curve_match.group("sha"))
  427. elif line.startswith("Msg = "):
  428. if data is not None:
  429. vectors.append(data)
  430. hexmsg = line.split("=")[1].strip().encode("ascii")
  431. data = {
  432. "curve": curve_name,
  433. "digest_algorithm": digest_name,
  434. "message": binascii.unhexlify(hexmsg)
  435. }
  436. elif data is not None:
  437. if line.startswith("Qx = "):
  438. data["x"] = int(line.split("=")[1], 16)
  439. elif line.startswith("Qy = "):
  440. data["y"] = int(line.split("=")[1], 16)
  441. elif line.startswith("R = "):
  442. data["r"] = int(line.split("=")[1], 16)
  443. elif line.startswith("S = "):
  444. data["s"] = int(line.split("=")[1], 16)
  445. elif line.startswith("d = "):
  446. data["d"] = int(line.split("=")[1], 16)
  447. elif line.startswith("Result = "):
  448. data["fail"] = line.split("=")[1].strip()[0] == "F"
  449. assert data is not None
  450. vectors.append(data)
  451. return vectors
  452. def load_kasvs_dh_vectors(vector_data):
  453. """
  454. Loads data out of the KASVS key exchange vector data
  455. """
  456. result_rx = re.compile(r"([FP]) \(([0-9]+) -")
  457. vectors = []
  458. data = {
  459. "fail_z": False,
  460. "fail_agree": False
  461. }
  462. for line in vector_data:
  463. line = line.strip()
  464. if not line or line.startswith("#"):
  465. continue
  466. if line.startswith("P = "):
  467. data["p"] = int(line.split("=")[1], 16)
  468. elif line.startswith("Q = "):
  469. data["q"] = int(line.split("=")[1], 16)
  470. elif line.startswith("G = "):
  471. data["g"] = int(line.split("=")[1], 16)
  472. elif line.startswith("Z = "):
  473. z_hex = line.split("=")[1].strip().encode("ascii")
  474. data["z"] = binascii.unhexlify(z_hex)
  475. elif line.startswith("XstatCAVS = "):
  476. data["x1"] = int(line.split("=")[1], 16)
  477. elif line.startswith("YstatCAVS = "):
  478. data["y1"] = int(line.split("=")[1], 16)
  479. elif line.startswith("XstatIUT = "):
  480. data["x2"] = int(line.split("=")[1], 16)
  481. elif line.startswith("YstatIUT = "):
  482. data["y2"] = int(line.split("=")[1], 16)
  483. elif line.startswith("Result = "):
  484. result_str = line.split("=")[1].strip()
  485. match = result_rx.match(result_str)
  486. if match.group(1) == "F":
  487. if int(match.group(2)) in (5, 10):
  488. data["fail_z"] = True
  489. else:
  490. data["fail_agree"] = True
  491. vectors.append(data)
  492. data = {
  493. "p": data["p"],
  494. "q": data["q"],
  495. "g": data["g"],
  496. "fail_z": False,
  497. "fail_agree": False
  498. }
  499. return vectors
  500. def load_kasvs_ecdh_vectors(vector_data):
  501. """
  502. Loads data out of the KASVS key exchange vector data
  503. """
  504. curve_name_map = {
  505. "P-192": "secp192r1",
  506. "P-224": "secp224r1",
  507. "P-256": "secp256r1",
  508. "P-384": "secp384r1",
  509. "P-521": "secp521r1",
  510. }
  511. result_rx = re.compile(r"([FP]) \(([0-9]+) -")
  512. tags = []
  513. sets = {}
  514. vectors = []
  515. # find info in header
  516. for line in vector_data:
  517. line = line.strip()
  518. if line.startswith("#"):
  519. parm = line.split("Parameter set(s) supported:")
  520. if len(parm) == 2:
  521. names = parm[1].strip().split()
  522. for n in names:
  523. tags.append("[%s]" % n)
  524. break
  525. # Sets Metadata
  526. tag = None
  527. curve = None
  528. for line in vector_data:
  529. line = line.strip()
  530. if not line or line.startswith("#"):
  531. continue
  532. if line in tags:
  533. tag = line
  534. curve = None
  535. elif line.startswith("[Curve selected:"):
  536. curve = curve_name_map[line.split(':')[1].strip()[:-1]]
  537. if tag is not None and curve is not None:
  538. sets[tag.strip("[]")] = curve
  539. tag = None
  540. if len(tags) == len(sets):
  541. break
  542. # Data
  543. data = {
  544. "CAVS": {},
  545. "IUT": {},
  546. }
  547. tag = None
  548. for line in vector_data:
  549. line = line.strip()
  550. if not line or line.startswith("#"):
  551. continue
  552. if line.startswith("["):
  553. tag = line.split()[0][1:]
  554. elif line.startswith("COUNT = "):
  555. data["COUNT"] = int(line.split("=")[1])
  556. elif line.startswith("dsCAVS = "):
  557. data["CAVS"]["d"] = int(line.split("=")[1], 16)
  558. elif line.startswith("QsCAVSx = "):
  559. data["CAVS"]["x"] = int(line.split("=")[1], 16)
  560. elif line.startswith("QsCAVSy = "):
  561. data["CAVS"]["y"] = int(line.split("=")[1], 16)
  562. elif line.startswith("dsIUT = "):
  563. data["IUT"]["d"] = int(line.split("=")[1], 16)
  564. elif line.startswith("QsIUTx = "):
  565. data["IUT"]["x"] = int(line.split("=")[1], 16)
  566. elif line.startswith("QsIUTy = "):
  567. data["IUT"]["y"] = int(line.split("=")[1], 16)
  568. elif line.startswith("OI = "):
  569. data["OI"] = int(line.split("=")[1], 16)
  570. elif line.startswith("Z = "):
  571. data["Z"] = int(line.split("=")[1], 16)
  572. elif line.startswith("DKM = "):
  573. data["DKM"] = int(line.split("=")[1], 16)
  574. elif line.startswith("Result = "):
  575. result_str = line.split("=")[1].strip()
  576. match = result_rx.match(result_str)
  577. if match.group(1) == "F":
  578. data["fail"] = True
  579. else:
  580. data["fail"] = False
  581. data["errno"] = int(match.group(2))
  582. data["curve"] = sets[tag]
  583. vectors.append(data)
  584. data = {
  585. "CAVS": {},
  586. "IUT": {},
  587. }
  588. return vectors
  589. def load_x963_vectors(vector_data):
  590. """
  591. Loads data out of the X9.63 vector data
  592. """
  593. vectors = []
  594. # Sets Metadata
  595. hashname = None
  596. vector = {}
  597. for line in vector_data:
  598. line = line.strip()
  599. if line.startswith("[SHA"):
  600. hashname = line[1:-1]
  601. shared_secret_len = 0
  602. shared_info_len = 0
  603. key_data_len = 0
  604. elif line.startswith("[shared secret length"):
  605. shared_secret_len = int(line[1:-1].split("=")[1].strip())
  606. elif line.startswith("[SharedInfo length"):
  607. shared_info_len = int(line[1:-1].split("=")[1].strip())
  608. elif line.startswith("[key data length"):
  609. key_data_len = int(line[1:-1].split("=")[1].strip())
  610. elif line.startswith("COUNT"):
  611. count = int(line.split("=")[1].strip())
  612. vector["hash"] = hashname
  613. vector["count"] = count
  614. vector["shared_secret_length"] = shared_secret_len
  615. vector["sharedinfo_length"] = shared_info_len
  616. vector["key_data_length"] = key_data_len
  617. elif line.startswith("Z"):
  618. vector["Z"] = line.split("=")[1].strip()
  619. assert math.ceil(shared_secret_len / 8) * 2 == len(vector["Z"])
  620. elif line.startswith("SharedInfo"):
  621. if shared_info_len != 0:
  622. vector["sharedinfo"] = line.split("=")[1].strip()
  623. silen = len(vector["sharedinfo"])
  624. assert math.ceil(shared_info_len / 8) * 2 == silen
  625. elif line.startswith("key_data"):
  626. vector["key_data"] = line.split("=")[1].strip()
  627. assert math.ceil(key_data_len / 8) * 2 == len(vector["key_data"])
  628. vectors.append(vector)
  629. vector = {}
  630. return vectors
  631. def load_nist_kbkdf_vectors(vector_data):
  632. """
  633. Load NIST SP 800-108 KDF Vectors
  634. """
  635. vectors = []
  636. test_data = None
  637. tag = {}
  638. for line in vector_data:
  639. line = line.strip()
  640. if not line or line.startswith("#"):
  641. continue
  642. if line.startswith("[") and line.endswith("]"):
  643. tag_data = line[1:-1]
  644. name, value = [c.strip() for c in tag_data.split("=")]
  645. if value.endswith('_BITS'):
  646. value = int(value.split('_')[0])
  647. tag.update({name.lower(): value})
  648. continue
  649. tag.update({name.lower(): value.lower()})
  650. elif line.startswith("COUNT="):
  651. test_data = {}
  652. test_data.update(tag)
  653. vectors.append(test_data)
  654. elif line.startswith("L"):
  655. name, value = [c.strip() for c in line.split("=")]
  656. test_data[name.lower()] = int(value)
  657. else:
  658. name, value = [c.strip() for c in line.split("=")]
  659. test_data[name.lower()] = value.encode("ascii")
  660. return vectors
  661. def load_ed25519_vectors(vector_data):
  662. data = []
  663. for line in vector_data:
  664. secret_key, public_key, message, signature, _ = line.split(':')
  665. # In the vectors the first element is secret key + public key
  666. secret_key = secret_key[0:64]
  667. # In the vectors the signature section is signature + message
  668. signature = signature[0:128]
  669. data.append({
  670. "secret_key": secret_key,
  671. "public_key": public_key,
  672. "message": message,
  673. "signature": signature
  674. })
  675. return data
  676. def load_nist_ccm_vectors(vector_data):
  677. test_data = None
  678. section_data = None
  679. global_data = {}
  680. new_section = False
  681. data = []
  682. for line in vector_data:
  683. line = line.strip()
  684. # Blank lines and comments should be ignored
  685. if not line or line.startswith("#"):
  686. continue
  687. # Some of the CCM vectors have global values for this. They are always
  688. # at the top before the first section header (see: VADT, VNT, VPT)
  689. if line.startswith(("Alen", "Plen", "Nlen", "Tlen")):
  690. name, value = [c.strip() for c in line.split("=")]
  691. global_data[name.lower()] = int(value)
  692. continue
  693. # section headers contain length data we might care about
  694. if line.startswith("["):
  695. new_section = True
  696. section_data = {}
  697. section = line[1:-1]
  698. items = [c.strip() for c in section.split(",")]
  699. for item in items:
  700. name, value = [c.strip() for c in item.split("=")]
  701. section_data[name.lower()] = int(value)
  702. continue
  703. name, value = [c.strip() for c in line.split("=")]
  704. if name.lower() in ("key", "nonce") and new_section:
  705. section_data[name.lower()] = value.encode("ascii")
  706. continue
  707. new_section = False
  708. # Payload is sometimes special because these vectors are absurd. Each
  709. # example may or may not have a payload. If it does not then the
  710. # previous example's payload should be used. We accomplish this by
  711. # writing it into the section_data. Because we update each example
  712. # with the section data it will be overwritten if a new payload value
  713. # is present. NIST should be ashamed of their vector creation.
  714. if name.lower() == "payload":
  715. section_data[name.lower()] = value.encode("ascii")
  716. # Result is a special token telling us if the test should pass/fail.
  717. # This is only present in the DVPT CCM tests
  718. if name.lower() == "result":
  719. if value.lower() == "pass":
  720. test_data["fail"] = False
  721. else:
  722. test_data["fail"] = True
  723. continue
  724. # COUNT is a special token that indicates a new block of data
  725. if name.lower() == "count":
  726. test_data = {}
  727. test_data.update(global_data)
  728. test_data.update(section_data)
  729. data.append(test_data)
  730. continue
  731. # For all other tokens we simply want the name, value stored in
  732. # the dictionary
  733. else:
  734. test_data[name.lower()] = value.encode("ascii")
  735. return data
  736. class WycheproofTest(object):
  737. def __init__(self, testgroup, testcase):
  738. self.testgroup = testgroup
  739. self.testcase = testcase
  740. def __repr__(self):
  741. return "<WycheproofTest({!r}, {!r}, tcId={})>".format(
  742. self.testgroup, self.testcase, self.testcase["tcId"],
  743. )
  744. @property
  745. def valid(self):
  746. return self.testcase["result"] == "valid"
  747. @property
  748. def acceptable(self):
  749. return self.testcase["result"] == "acceptable"
  750. @property
  751. def invalid(self):
  752. return self.testcase["result"] == "invalid"
  753. def has_flag(self, flag):
  754. return flag in self.testcase["flags"]
  755. def skip_if_wycheproof_none(wycheproof):
  756. # This is factored into its own function so we can easily test both
  757. # branches
  758. if wycheproof is None:
  759. pytest.skip("--wycheproof-root not provided")
  760. def load_wycheproof_tests(wycheproof, test_file):
  761. path = os.path.join(wycheproof, "testvectors", test_file)
  762. with open(path) as f:
  763. data = json.load(f)
  764. for group in data["testGroups"]:
  765. cases = group.pop("tests")
  766. for c in cases:
  767. yield WycheproofTest(group, c)