utils.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. # This file is dual licensed under the terms of the Apache License, Version
  2. # 2.0, and the BSD License. See the LICENSE file in the root of this repository
  3. # for complete details.
  4. from __future__ import absolute_import, division, print_function
  5. import binascii
  6. import collections
  7. import math
  8. import re
  9. from contextlib import contextmanager
  10. import pytest
  11. import six
  12. from cryptography.exceptions import UnsupportedAlgorithm
  13. import cryptography_vectors
  14. HashVector = collections.namedtuple("HashVector", ["message", "digest"])
  15. KeyedHashVector = collections.namedtuple(
  16. "KeyedHashVector", ["message", "digest", "key"]
  17. )
  18. def check_backend_support(backend, item):
  19. supported = item.keywords.get("supported")
  20. if supported:
  21. for mark in supported:
  22. if not mark.kwargs["only_if"](backend):
  23. pytest.skip("{0} ({1})".format(
  24. mark.kwargs["skip_message"], backend
  25. ))
  26. @contextmanager
  27. def raises_unsupported_algorithm(reason):
  28. with pytest.raises(UnsupportedAlgorithm) as exc_info:
  29. yield exc_info
  30. assert exc_info.value._reason is reason
  31. def load_vectors_from_file(filename, loader, mode="r"):
  32. with cryptography_vectors.open_vector_file(filename, mode) as vector_file:
  33. return loader(vector_file)
  34. def load_nist_vectors(vector_data):
  35. test_data = None
  36. data = []
  37. for line in vector_data:
  38. line = line.strip()
  39. # Blank lines, comments, and section headers are ignored
  40. if not line or line.startswith("#") or (line.startswith("[") and
  41. line.endswith("]")):
  42. continue
  43. if line.strip() == "FAIL":
  44. test_data["fail"] = True
  45. continue
  46. # Build our data using a simple Key = Value format
  47. name, value = [c.strip() for c in line.split("=")]
  48. # Some tests (PBKDF2) contain \0, which should be interpreted as a
  49. # null character rather than literal.
  50. value = value.replace("\\0", "\0")
  51. # COUNT is a special token that indicates a new block of data
  52. if name.upper() == "COUNT":
  53. test_data = {}
  54. data.append(test_data)
  55. continue
  56. # For all other tokens we simply want the name, value stored in
  57. # the dictionary
  58. else:
  59. test_data[name.lower()] = value.encode("ascii")
  60. return data
  61. def load_cryptrec_vectors(vector_data):
  62. cryptrec_list = []
  63. for line in vector_data:
  64. line = line.strip()
  65. # Blank lines and comments are ignored
  66. if not line or line.startswith("#"):
  67. continue
  68. if line.startswith("K"):
  69. key = line.split(" : ")[1].replace(" ", "").encode("ascii")
  70. elif line.startswith("P"):
  71. pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
  72. elif line.startswith("C"):
  73. ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
  74. # after a C is found the K+P+C tuple is complete
  75. # there are many P+C pairs for each K
  76. cryptrec_list.append({
  77. "key": key,
  78. "plaintext": pt,
  79. "ciphertext": ct
  80. })
  81. else:
  82. raise ValueError("Invalid line in file '{}'".format(line))
  83. return cryptrec_list
  84. def load_hash_vectors(vector_data):
  85. vectors = []
  86. key = None
  87. msg = None
  88. md = None
  89. for line in vector_data:
  90. line = line.strip()
  91. if not line or line.startswith("#") or line.startswith("["):
  92. continue
  93. if line.startswith("Len"):
  94. length = int(line.split(" = ")[1])
  95. elif line.startswith("Key"):
  96. # HMAC vectors contain a key attribute. Hash vectors do not.
  97. key = line.split(" = ")[1].encode("ascii")
  98. elif line.startswith("Msg"):
  99. # In the NIST vectors they have chosen to represent an empty
  100. # string as hex 00, which is of course not actually an empty
  101. # string. So we parse the provided length and catch this edge case.
  102. msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
  103. elif line.startswith("MD"):
  104. md = line.split(" = ")[1]
  105. # after MD is found the Msg+MD (+ potential key) tuple is complete
  106. if key is not None:
  107. vectors.append(KeyedHashVector(msg, md, key))
  108. key = None
  109. msg = None
  110. md = None
  111. else:
  112. vectors.append(HashVector(msg, md))
  113. msg = None
  114. md = None
  115. else:
  116. raise ValueError("Unknown line in hash vector")
  117. return vectors
  118. def load_pkcs1_vectors(vector_data):
  119. """
  120. Loads data out of RSA PKCS #1 vector files.
  121. """
  122. private_key_vector = None
  123. public_key_vector = None
  124. attr = None
  125. key = None
  126. example_vector = None
  127. examples = []
  128. vectors = []
  129. for line in vector_data:
  130. if (
  131. line.startswith("# PSS Example") or
  132. line.startswith("# OAEP Example") or
  133. line.startswith("# PKCS#1 v1.5")
  134. ):
  135. if example_vector:
  136. for key, value in six.iteritems(example_vector):
  137. hex_str = "".join(value).replace(" ", "").encode("ascii")
  138. example_vector[key] = hex_str
  139. examples.append(example_vector)
  140. attr = None
  141. example_vector = collections.defaultdict(list)
  142. if line.startswith("# Message"):
  143. attr = "message"
  144. continue
  145. elif line.startswith("# Salt"):
  146. attr = "salt"
  147. continue
  148. elif line.startswith("# Seed"):
  149. attr = "seed"
  150. continue
  151. elif line.startswith("# Signature"):
  152. attr = "signature"
  153. continue
  154. elif line.startswith("# Encryption"):
  155. attr = "encryption"
  156. continue
  157. elif (
  158. example_vector and
  159. line.startswith("# =============================================")
  160. ):
  161. for key, value in six.iteritems(example_vector):
  162. hex_str = "".join(value).replace(" ", "").encode("ascii")
  163. example_vector[key] = hex_str
  164. examples.append(example_vector)
  165. example_vector = None
  166. attr = None
  167. elif example_vector and line.startswith("#"):
  168. continue
  169. else:
  170. if attr is not None and example_vector is not None:
  171. example_vector[attr].append(line.strip())
  172. continue
  173. if (
  174. line.startswith("# Example") or
  175. line.startswith("# =============================================")
  176. ):
  177. if key:
  178. assert private_key_vector
  179. assert public_key_vector
  180. for key, value in six.iteritems(public_key_vector):
  181. hex_str = "".join(value).replace(" ", "")
  182. public_key_vector[key] = int(hex_str, 16)
  183. for key, value in six.iteritems(private_key_vector):
  184. hex_str = "".join(value).replace(" ", "")
  185. private_key_vector[key] = int(hex_str, 16)
  186. private_key_vector["examples"] = examples
  187. examples = []
  188. assert (
  189. private_key_vector['public_exponent'] ==
  190. public_key_vector['public_exponent']
  191. )
  192. assert (
  193. private_key_vector['modulus'] ==
  194. public_key_vector['modulus']
  195. )
  196. vectors.append(
  197. (private_key_vector, public_key_vector)
  198. )
  199. public_key_vector = collections.defaultdict(list)
  200. private_key_vector = collections.defaultdict(list)
  201. key = None
  202. attr = None
  203. if private_key_vector is None or public_key_vector is None:
  204. # Random garbage to defeat CPython's peephole optimizer so that
  205. # coverage records correctly: https://bugs.python.org/issue2506
  206. 1 + 1
  207. continue
  208. if line.startswith("# Private key"):
  209. key = private_key_vector
  210. elif line.startswith("# Public key"):
  211. key = public_key_vector
  212. elif line.startswith("# Modulus:"):
  213. attr = "modulus"
  214. elif line.startswith("# Public exponent:"):
  215. attr = "public_exponent"
  216. elif line.startswith("# Exponent:"):
  217. if key is public_key_vector:
  218. attr = "public_exponent"
  219. else:
  220. assert key is private_key_vector
  221. attr = "private_exponent"
  222. elif line.startswith("# Prime 1:"):
  223. attr = "p"
  224. elif line.startswith("# Prime 2:"):
  225. attr = "q"
  226. elif line.startswith("# Prime exponent 1:"):
  227. attr = "dmp1"
  228. elif line.startswith("# Prime exponent 2:"):
  229. attr = "dmq1"
  230. elif line.startswith("# Coefficient:"):
  231. attr = "iqmp"
  232. elif line.startswith("#"):
  233. attr = None
  234. else:
  235. if key is not None and attr is not None:
  236. key[attr].append(line.strip())
  237. return vectors
  238. def load_rsa_nist_vectors(vector_data):
  239. test_data = None
  240. p = None
  241. salt_length = None
  242. data = []
  243. for line in vector_data:
  244. line = line.strip()
  245. # Blank lines and section headers are ignored
  246. if not line or line.startswith("["):
  247. continue
  248. if line.startswith("# Salt len:"):
  249. salt_length = int(line.split(":")[1].strip())
  250. continue
  251. elif line.startswith("#"):
  252. continue
  253. # Build our data using a simple Key = Value format
  254. name, value = [c.strip() for c in line.split("=")]
  255. if name == "n":
  256. n = int(value, 16)
  257. elif name == "e" and p is None:
  258. e = int(value, 16)
  259. elif name == "p":
  260. p = int(value, 16)
  261. elif name == "q":
  262. q = int(value, 16)
  263. elif name == "SHAAlg":
  264. if p is None:
  265. test_data = {
  266. "modulus": n,
  267. "public_exponent": e,
  268. "salt_length": salt_length,
  269. "algorithm": value,
  270. "fail": False
  271. }
  272. else:
  273. test_data = {
  274. "modulus": n,
  275. "p": p,
  276. "q": q,
  277. "algorithm": value
  278. }
  279. if salt_length is not None:
  280. test_data["salt_length"] = salt_length
  281. data.append(test_data)
  282. elif name == "e" and p is not None:
  283. test_data["public_exponent"] = int(value, 16)
  284. elif name == "d":
  285. test_data["private_exponent"] = int(value, 16)
  286. elif name == "Result":
  287. test_data["fail"] = value.startswith("F")
  288. # For all other tokens we simply want the name, value stored in
  289. # the dictionary
  290. else:
  291. test_data[name.lower()] = value.encode("ascii")
  292. return data
  293. def load_fips_dsa_key_pair_vectors(vector_data):
  294. """
  295. Loads data out of the FIPS DSA KeyPair vector files.
  296. """
  297. vectors = []
  298. # When reading_key_data is set to True it tells the loader to continue
  299. # constructing dictionaries. We set reading_key_data to False during the
  300. # blocks of the vectors of N=224 because we don't support it.
  301. reading_key_data = True
  302. for line in vector_data:
  303. line = line.strip()
  304. if not line or line.startswith("#"):
  305. continue
  306. elif line.startswith("[mod = L=1024"):
  307. continue
  308. elif line.startswith("[mod = L=2048, N=224"):
  309. reading_key_data = False
  310. continue
  311. elif line.startswith("[mod = L=2048, N=256"):
  312. reading_key_data = True
  313. continue
  314. elif line.startswith("[mod = L=3072"):
  315. continue
  316. if reading_key_data:
  317. if line.startswith("P"):
  318. vectors.append({'p': int(line.split("=")[1], 16)})
  319. elif line.startswith("Q"):
  320. vectors[-1]['q'] = int(line.split("=")[1], 16)
  321. elif line.startswith("G"):
  322. vectors[-1]['g'] = int(line.split("=")[1], 16)
  323. elif line.startswith("X") and 'x' not in vectors[-1]:
  324. vectors[-1]['x'] = int(line.split("=")[1], 16)
  325. elif line.startswith("X") and 'x' in vectors[-1]:
  326. vectors.append({'p': vectors[-1]['p'],
  327. 'q': vectors[-1]['q'],
  328. 'g': vectors[-1]['g'],
  329. 'x': int(line.split("=")[1], 16)
  330. })
  331. elif line.startswith("Y"):
  332. vectors[-1]['y'] = int(line.split("=")[1], 16)
  333. return vectors
  334. def load_fips_dsa_sig_vectors(vector_data):
  335. """
  336. Loads data out of the FIPS DSA SigVer vector files.
  337. """
  338. vectors = []
  339. sha_regex = re.compile(
  340. r"\[mod = L=...., N=..., SHA-(?P<sha>1|224|256|384|512)\]"
  341. )
  342. # When reading_key_data is set to True it tells the loader to continue
  343. # constructing dictionaries. We set reading_key_data to False during the
  344. # blocks of the vectors of N=224 because we don't support it.
  345. reading_key_data = True
  346. for line in vector_data:
  347. line = line.strip()
  348. if not line or line.startswith("#"):
  349. continue
  350. sha_match = sha_regex.match(line)
  351. if sha_match:
  352. digest_algorithm = "SHA-{0}".format(sha_match.group("sha"))
  353. if line.startswith("[mod = L=2048, N=224"):
  354. reading_key_data = False
  355. continue
  356. elif line.startswith("[mod = L=2048, N=256"):
  357. reading_key_data = True
  358. continue
  359. if not reading_key_data or line.startswith("[mod"):
  360. continue
  361. name, value = [c.strip() for c in line.split("=")]
  362. if name == "P":
  363. vectors.append({'p': int(value, 16),
  364. 'digest_algorithm': digest_algorithm})
  365. elif name == "Q":
  366. vectors[-1]['q'] = int(value, 16)
  367. elif name == "G":
  368. vectors[-1]['g'] = int(value, 16)
  369. elif name == "Msg" and 'msg' not in vectors[-1]:
  370. hexmsg = value.strip().encode("ascii")
  371. vectors[-1]['msg'] = binascii.unhexlify(hexmsg)
  372. elif name == "Msg" and 'msg' in vectors[-1]:
  373. hexmsg = value.strip().encode("ascii")
  374. vectors.append({'p': vectors[-1]['p'],
  375. 'q': vectors[-1]['q'],
  376. 'g': vectors[-1]['g'],
  377. 'digest_algorithm':
  378. vectors[-1]['digest_algorithm'],
  379. 'msg': binascii.unhexlify(hexmsg)})
  380. elif name == "X":
  381. vectors[-1]['x'] = int(value, 16)
  382. elif name == "Y":
  383. vectors[-1]['y'] = int(value, 16)
  384. elif name == "R":
  385. vectors[-1]['r'] = int(value, 16)
  386. elif name == "S":
  387. vectors[-1]['s'] = int(value, 16)
  388. elif name == "Result":
  389. vectors[-1]['result'] = value.split("(")[0].strip()
  390. return vectors
  391. # http://tools.ietf.org/html/rfc4492#appendix-A
  392. _ECDSA_CURVE_NAMES = {
  393. "P-192": "secp192r1",
  394. "P-224": "secp224r1",
  395. "P-256": "secp256r1",
  396. "P-384": "secp384r1",
  397. "P-521": "secp521r1",
  398. "K-163": "sect163k1",
  399. "K-233": "sect233k1",
  400. "K-256": "secp256k1",
  401. "K-283": "sect283k1",
  402. "K-409": "sect409k1",
  403. "K-571": "sect571k1",
  404. "B-163": "sect163r2",
  405. "B-233": "sect233r1",
  406. "B-283": "sect283r1",
  407. "B-409": "sect409r1",
  408. "B-571": "sect571r1",
  409. }
  410. def load_fips_ecdsa_key_pair_vectors(vector_data):
  411. """
  412. Loads data out of the FIPS ECDSA KeyPair vector files.
  413. """
  414. vectors = []
  415. key_data = None
  416. for line in vector_data:
  417. line = line.strip()
  418. if not line or line.startswith("#"):
  419. continue
  420. if line[1:-1] in _ECDSA_CURVE_NAMES:
  421. curve_name = _ECDSA_CURVE_NAMES[line[1:-1]]
  422. elif line.startswith("d = "):
  423. if key_data is not None:
  424. vectors.append(key_data)
  425. key_data = {
  426. "curve": curve_name,
  427. "d": int(line.split("=")[1], 16)
  428. }
  429. elif key_data is not None:
  430. if line.startswith("Qx = "):
  431. key_data["x"] = int(line.split("=")[1], 16)
  432. elif line.startswith("Qy = "):
  433. key_data["y"] = int(line.split("=")[1], 16)
  434. assert key_data is not None
  435. vectors.append(key_data)
  436. return vectors
  437. def load_fips_ecdsa_signing_vectors(vector_data):
  438. """
  439. Loads data out of the FIPS ECDSA SigGen vector files.
  440. """
  441. vectors = []
  442. curve_rx = re.compile(
  443. r"\[(?P<curve>[PKB]-[0-9]{3}),SHA-(?P<sha>1|224|256|384|512)\]"
  444. )
  445. data = None
  446. for line in vector_data:
  447. line = line.strip()
  448. curve_match = curve_rx.match(line)
  449. if curve_match:
  450. curve_name = _ECDSA_CURVE_NAMES[curve_match.group("curve")]
  451. digest_name = "SHA-{0}".format(curve_match.group("sha"))
  452. elif line.startswith("Msg = "):
  453. if data is not None:
  454. vectors.append(data)
  455. hexmsg = line.split("=")[1].strip().encode("ascii")
  456. data = {
  457. "curve": curve_name,
  458. "digest_algorithm": digest_name,
  459. "message": binascii.unhexlify(hexmsg)
  460. }
  461. elif data is not None:
  462. if line.startswith("Qx = "):
  463. data["x"] = int(line.split("=")[1], 16)
  464. elif line.startswith("Qy = "):
  465. data["y"] = int(line.split("=")[1], 16)
  466. elif line.startswith("R = "):
  467. data["r"] = int(line.split("=")[1], 16)
  468. elif line.startswith("S = "):
  469. data["s"] = int(line.split("=")[1], 16)
  470. elif line.startswith("d = "):
  471. data["d"] = int(line.split("=")[1], 16)
  472. elif line.startswith("Result = "):
  473. data["fail"] = line.split("=")[1].strip()[0] == "F"
  474. assert data is not None
  475. vectors.append(data)
  476. return vectors
  477. def load_kasvs_dh_vectors(vector_data):
  478. """
  479. Loads data out of the KASVS key exchange vector data
  480. """
  481. result_rx = re.compile(r"([FP]) \(([0-9]+) -")
  482. vectors = []
  483. data = {
  484. "fail_z": False,
  485. "fail_agree": False
  486. }
  487. for line in vector_data:
  488. line = line.strip()
  489. if not line or line.startswith("#"):
  490. continue
  491. if line.startswith("P = "):
  492. data["p"] = int(line.split("=")[1], 16)
  493. elif line.startswith("Q = "):
  494. data["q"] = int(line.split("=")[1], 16)
  495. elif line.startswith("G = "):
  496. data["g"] = int(line.split("=")[1], 16)
  497. elif line.startswith("Z = "):
  498. z_hex = line.split("=")[1].strip().encode("ascii")
  499. data["z"] = binascii.unhexlify(z_hex)
  500. elif line.startswith("XstatCAVS = "):
  501. data["x1"] = int(line.split("=")[1], 16)
  502. elif line.startswith("YstatCAVS = "):
  503. data["y1"] = int(line.split("=")[1], 16)
  504. elif line.startswith("XstatIUT = "):
  505. data["x2"] = int(line.split("=")[1], 16)
  506. elif line.startswith("YstatIUT = "):
  507. data["y2"] = int(line.split("=")[1], 16)
  508. elif line.startswith("Result = "):
  509. result_str = line.split("=")[1].strip()
  510. match = result_rx.match(result_str)
  511. if match.group(1) == "F":
  512. if int(match.group(2)) in (5, 10):
  513. data["fail_z"] = True
  514. else:
  515. data["fail_agree"] = True
  516. vectors.append(data)
  517. data = {
  518. "p": data["p"],
  519. "q": data["q"],
  520. "g": data["g"],
  521. "fail_z": False,
  522. "fail_agree": False
  523. }
  524. return vectors
  525. def load_kasvs_ecdh_vectors(vector_data):
  526. """
  527. Loads data out of the KASVS key exchange vector data
  528. """
  529. curve_name_map = {
  530. "P-192": "secp192r1",
  531. "P-224": "secp224r1",
  532. "P-256": "secp256r1",
  533. "P-384": "secp384r1",
  534. "P-521": "secp521r1",
  535. }
  536. result_rx = re.compile(r"([FP]) \(([0-9]+) -")
  537. tags = []
  538. sets = {}
  539. vectors = []
  540. # find info in header
  541. for line in vector_data:
  542. line = line.strip()
  543. if line.startswith("#"):
  544. parm = line.split("Parameter set(s) supported:")
  545. if len(parm) == 2:
  546. names = parm[1].strip().split()
  547. for n in names:
  548. tags.append("[%s]" % n)
  549. break
  550. # Sets Metadata
  551. tag = None
  552. curve = None
  553. for line in vector_data:
  554. line = line.strip()
  555. if not line or line.startswith("#"):
  556. continue
  557. if line in tags:
  558. tag = line
  559. curve = None
  560. elif line.startswith("[Curve selected:"):
  561. curve = curve_name_map[line.split(':')[1].strip()[:-1]]
  562. if tag is not None and curve is not None:
  563. sets[tag.strip("[]")] = curve
  564. tag = None
  565. if len(tags) == len(sets):
  566. break
  567. # Data
  568. data = {
  569. "CAVS": {},
  570. "IUT": {},
  571. }
  572. tag = None
  573. for line in vector_data:
  574. line = line.strip()
  575. if not line or line.startswith("#"):
  576. continue
  577. if line.startswith("["):
  578. tag = line.split()[0][1:]
  579. elif line.startswith("COUNT = "):
  580. data["COUNT"] = int(line.split("=")[1])
  581. elif line.startswith("dsCAVS = "):
  582. data["CAVS"]["d"] = int(line.split("=")[1], 16)
  583. elif line.startswith("QsCAVSx = "):
  584. data["CAVS"]["x"] = int(line.split("=")[1], 16)
  585. elif line.startswith("QsCAVSy = "):
  586. data["CAVS"]["y"] = int(line.split("=")[1], 16)
  587. elif line.startswith("dsIUT = "):
  588. data["IUT"]["d"] = int(line.split("=")[1], 16)
  589. elif line.startswith("QsIUTx = "):
  590. data["IUT"]["x"] = int(line.split("=")[1], 16)
  591. elif line.startswith("QsIUTy = "):
  592. data["IUT"]["y"] = int(line.split("=")[1], 16)
  593. elif line.startswith("OI = "):
  594. data["OI"] = int(line.split("=")[1], 16)
  595. elif line.startswith("Z = "):
  596. data["Z"] = int(line.split("=")[1], 16)
  597. elif line.startswith("DKM = "):
  598. data["DKM"] = int(line.split("=")[1], 16)
  599. elif line.startswith("Result = "):
  600. result_str = line.split("=")[1].strip()
  601. match = result_rx.match(result_str)
  602. if match.group(1) == "F":
  603. data["fail"] = True
  604. else:
  605. data["fail"] = False
  606. data["errno"] = int(match.group(2))
  607. data["curve"] = sets[tag]
  608. vectors.append(data)
  609. data = {
  610. "CAVS": {},
  611. "IUT": {},
  612. }
  613. return vectors
  614. def load_x963_vectors(vector_data):
  615. """
  616. Loads data out of the X9.63 vector data
  617. """
  618. vectors = []
  619. # Sets Metadata
  620. hashname = None
  621. vector = {}
  622. for line in vector_data:
  623. line = line.strip()
  624. if line.startswith("[SHA"):
  625. hashname = line[1:-1]
  626. shared_secret_len = 0
  627. shared_info_len = 0
  628. key_data_len = 0
  629. elif line.startswith("[shared secret length"):
  630. shared_secret_len = int(line[1:-1].split("=")[1].strip())
  631. elif line.startswith("[SharedInfo length"):
  632. shared_info_len = int(line[1:-1].split("=")[1].strip())
  633. elif line.startswith("[key data length"):
  634. key_data_len = int(line[1:-1].split("=")[1].strip())
  635. elif line.startswith("COUNT"):
  636. count = int(line.split("=")[1].strip())
  637. vector["hash"] = hashname
  638. vector["count"] = count
  639. vector["shared_secret_length"] = shared_secret_len
  640. vector["sharedinfo_length"] = shared_info_len
  641. vector["key_data_length"] = key_data_len
  642. elif line.startswith("Z"):
  643. vector["Z"] = line.split("=")[1].strip()
  644. assert math.ceil(shared_secret_len / 8) * 2 == len(vector["Z"])
  645. elif line.startswith("SharedInfo"):
  646. if shared_info_len != 0:
  647. vector["sharedinfo"] = line.split("=")[1].strip()
  648. silen = len(vector["sharedinfo"])
  649. assert math.ceil(shared_info_len / 8) * 2 == silen
  650. elif line.startswith("key_data"):
  651. vector["key_data"] = line.split("=")[1].strip()
  652. assert math.ceil(key_data_len / 8) * 2 == len(vector["key_data"])
  653. vectors.append(vector)
  654. vector = {}
  655. return vectors
  656. def load_nist_kbkdf_vectors(vector_data):
  657. """
  658. Load NIST SP 800-108 KDF Vectors
  659. """
  660. vectors = []
  661. test_data = None
  662. tag = {}
  663. for line in vector_data:
  664. line = line.strip()
  665. if not line or line.startswith("#"):
  666. continue
  667. if line.startswith("[") and line.endswith("]"):
  668. tag_data = line[1:-1]
  669. name, value = [c.strip() for c in tag_data.split("=")]
  670. if value.endswith('_BITS'):
  671. value = int(value.split('_')[0])
  672. tag.update({name.lower(): value})
  673. continue
  674. tag.update({name.lower(): value.lower()})
  675. elif line.startswith("COUNT="):
  676. test_data = dict()
  677. test_data.update(tag)
  678. vectors.append(test_data)
  679. elif line.startswith("L"):
  680. name, value = [c.strip() for c in line.split("=")]
  681. test_data[name.lower()] = int(value)
  682. else:
  683. name, value = [c.strip() for c in line.split("=")]
  684. test_data[name.lower()] = value.encode("ascii")
  685. return vectors
  686. def load_ed25519_vectors(vector_data):
  687. data = []
  688. for line in vector_data:
  689. secret_key, public_key, message, signature, _ = line.split(':')
  690. # In the vectors the first element is secret key + public key
  691. secret_key = secret_key[0:64]
  692. # In the vectors the signature section is signature + message
  693. signature = signature[0:128]
  694. data.append({
  695. "secret_key": secret_key,
  696. "public_key": public_key,
  697. "message": message,
  698. "signature": signature
  699. })
  700. return data
  701. def load_nist_ccm_vectors(vector_data):
  702. test_data = None
  703. section_data = None
  704. global_data = {}
  705. new_section = False
  706. data = []
  707. for line in vector_data:
  708. line = line.strip()
  709. # Blank lines and comments should be ignored
  710. if not line or line.startswith("#"):
  711. continue
  712. # Some of the CCM vectors have global values for this. They are always
  713. # at the top before the first section header (see: VADT, VNT, VPT)
  714. if line.startswith(("Alen", "Plen", "Nlen", "Tlen")):
  715. name, value = [c.strip() for c in line.split("=")]
  716. global_data[name.lower()] = int(value)
  717. continue
  718. # section headers contain length data we might care about
  719. if line.startswith("["):
  720. new_section = True
  721. section_data = {}
  722. section = line[1:-1]
  723. items = [c.strip() for c in section.split(",")]
  724. for item in items:
  725. name, value = [c.strip() for c in item.split("=")]
  726. section_data[name.lower()] = int(value)
  727. continue
  728. name, value = [c.strip() for c in line.split("=")]
  729. if name.lower() in ("key", "nonce") and new_section:
  730. section_data[name.lower()] = value.encode("ascii")
  731. continue
  732. new_section = False
  733. # Payload is sometimes special because these vectors are absurd. Each
  734. # example may or may not have a payload. If it does not then the
  735. # previous example's payload should be used. We accomplish this by
  736. # writing it into the section_data. Because we update each example
  737. # with the section data it will be overwritten if a new payload value
  738. # is present. NIST should be ashamed of their vector creation.
  739. if name.lower() == "payload":
  740. section_data[name.lower()] = value.encode("ascii")
  741. # Result is a special token telling us if the test should pass/fail.
  742. # This is only present in the DVPT CCM tests
  743. if name.lower() == "result":
  744. if value.lower() == "pass":
  745. test_data["fail"] = False
  746. else:
  747. test_data["fail"] = True
  748. continue
  749. # COUNT is a special token that indicates a new block of data
  750. if name.lower() == "count":
  751. test_data = {}
  752. test_data.update(global_data)
  753. test_data.update(section_data)
  754. data.append(test_data)
  755. continue
  756. # For all other tokens we simply want the name, value stored in
  757. # the dictionary
  758. else:
  759. test_data[name.lower()] = value.encode("ascii")
  760. return data