mirror of
https://github.com/Mbed-TLS/mbedtls.git
synced 2026-03-20 11:11:08 +01:00
Move some scripts from mbedtls into the framework
Move a bunch of files from `scripts` and `mbedtls/scripts` to the framework. Most are not called from any scripts invoked by the CI, but a couple are. A subsequent commit will adapt the scripts. None of these scripts are referenced from other repositories except in documentation. The following files will be removed, and added to `mbedtls-framework`: * `scripts/ecp_comb_table.py` * `scripts/massif_max.pl` * `tests/scripts/audit-validity-dates.py` (moved to `scripts/`) * `tests/scripts/gen_ctr_drbg.pl` (moved to `scripts/`) * `tests/scripts/gen_gcm_decrypt.pl` (moved to `scripts/`) * `tests/scripts/gen_gcm_encrypt.pl` (moved to `scripts/`) * `tests/scripts/gen_pkcs1_v21_sign_verify.pl` (moved to `scripts/`) * `tests/scripts/generate-afl-tests.sh` (moved to `scripts/`) * `tests/scripts/generate_server9_bad_saltlen.py` (moved to `scripts/`) * `tests/scripts/run-metatests.sh` (moved to `scripts/`) * `tests/scripts/run_demos.py` (moved to `scripts/`) * `tests/scripts/test_config_script.py` (moved to `scripts/`) Signed-off-by: Gilles Peskine <Gilles.Peskine@arm.com>
This commit is contained in:
@@ -1,237 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Purpose
|
||||
|
||||
This script dumps comb table of ec curve. When you add a new ec curve, you
|
||||
can use this script to generate codes to define `<curve>_T` in ecp_curves.c
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
HOW_TO_ADD_NEW_CURVE = """
|
||||
If you are trying to add new curve, you can follow these steps:
|
||||
|
||||
1. Define curve parameters (<curve>_p, <curve>_gx, etc...) in ecp_curves.c.
|
||||
2. Add a macro to define <curve>_T to NULL following these parameters.
|
||||
3. Build mbedcrypto
|
||||
4. Run this script with an argument of new curve
|
||||
5. Copy the output of this script into ecp_curves.c and replace the macro added
|
||||
in Step 2
|
||||
6. Rebuild and test if everything is ok
|
||||
|
||||
Replace the <curve> in the above with the name of the curve you want to add."""
|
||||
|
||||
CC = os.getenv('CC', 'cc')
|
||||
MBEDTLS_LIBRARY_PATH = os.getenv('MBEDTLS_LIBRARY_PATH', "library")
|
||||
|
||||
SRC_DUMP_COMB_TABLE = r'''
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "mbedtls/ecp.h"
|
||||
#include "mbedtls/error.h"
|
||||
|
||||
static void dump_mpi_initialize( const char *name, const mbedtls_mpi *d )
|
||||
{
|
||||
uint8_t buf[128] = {0};
|
||||
size_t olen;
|
||||
uint8_t *p;
|
||||
|
||||
olen = mbedtls_mpi_size( d );
|
||||
mbedtls_mpi_write_binary_le( d, buf, olen );
|
||||
printf("static const mbedtls_mpi_uint %s[] = {\n", name);
|
||||
for (p = buf; p < buf + olen; p += 8) {
|
||||
printf( " BYTES_TO_T_UINT_8( 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X ),\n",
|
||||
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7] );
|
||||
}
|
||||
printf("};\n");
|
||||
}
|
||||
|
||||
static void dump_T( const mbedtls_ecp_group *grp )
|
||||
{
|
||||
char name[128];
|
||||
|
||||
printf( "#if MBEDTLS_ECP_FIXED_POINT_OPTIM == 1\n" );
|
||||
|
||||
for (size_t i = 0; i < grp->T_size; ++i) {
|
||||
snprintf( name, sizeof(name), "%s_T_%zu_X", CURVE_NAME, i );
|
||||
dump_mpi_initialize( name, &grp->T[i].X );
|
||||
|
||||
snprintf( name, sizeof(name), "%s_T_%zu_Y", CURVE_NAME, i );
|
||||
dump_mpi_initialize( name, &grp->T[i].Y );
|
||||
}
|
||||
printf( "static const mbedtls_ecp_point %s_T[%zu] = {\n", CURVE_NAME, grp->T_size );
|
||||
size_t olen;
|
||||
for (size_t i = 0; i < grp->T_size; ++i) {
|
||||
int z;
|
||||
if ( mbedtls_mpi_cmp_int(&grp->T[i].Z, 0) == 0 ) {
|
||||
z = 0;
|
||||
} else if ( mbedtls_mpi_cmp_int(&grp->T[i].Z, 1) == 0 ) {
|
||||
z = 1;
|
||||
} else {
|
||||
fprintf( stderr, "Unexpected value of Z (i = %d)\n", (int)i );
|
||||
exit( 1 );
|
||||
}
|
||||
printf( " ECP_POINT_INIT_XY_Z%d(%s_T_%zu_X, %s_T_%zu_Y),\n",
|
||||
z,
|
||||
CURVE_NAME, i,
|
||||
CURVE_NAME, i
|
||||
);
|
||||
}
|
||||
printf("};\n#endif\n\n");
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
int rc;
|
||||
mbedtls_mpi m;
|
||||
mbedtls_ecp_point R;
|
||||
mbedtls_ecp_group grp;
|
||||
|
||||
mbedtls_ecp_group_init( &grp );
|
||||
rc = mbedtls_ecp_group_load( &grp, CURVE_ID );
|
||||
if (rc != 0) {
|
||||
char buf[100];
|
||||
mbedtls_strerror( rc, buf, sizeof(buf) );
|
||||
fprintf( stderr, "mbedtls_ecp_group_load: %s (-0x%x)\n", buf, -rc );
|
||||
return 1;
|
||||
}
|
||||
grp.T = NULL;
|
||||
mbedtls_ecp_point_init( &R );
|
||||
mbedtls_mpi_init( &m);
|
||||
mbedtls_mpi_lset( &m, 1 );
|
||||
rc = mbedtls_ecp_mul( &grp, &R, &m, &grp.G, NULL, NULL );
|
||||
if ( rc != 0 ) {
|
||||
char buf[100];
|
||||
mbedtls_strerror( rc, buf, sizeof(buf) );
|
||||
fprintf( stderr, "mbedtls_ecp_mul: %s (-0x%x)\n", buf, -rc );
|
||||
return 1;
|
||||
}
|
||||
if ( grp.T == NULL ) {
|
||||
fprintf( stderr, "grp.T is not generated. Please make sure"
|
||||
"MBEDTLS_ECP_FIXED_POINT_OPTIM is enabled in mbedtls_config.h\n" );
|
||||
return 1;
|
||||
}
|
||||
dump_T( &grp );
|
||||
return 0;
|
||||
}
|
||||
'''
|
||||
|
||||
SRC_DUMP_KNOWN_CURVE = r'''
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "mbedtls/ecp.h"
|
||||
|
||||
int main() {
|
||||
const mbedtls_ecp_curve_info *info = mbedtls_ecp_curve_list();
|
||||
mbedtls_ecp_group grp;
|
||||
|
||||
mbedtls_ecp_group_init( &grp );
|
||||
while ( info->name != NULL ) {
|
||||
mbedtls_ecp_group_load( &grp, info->grp_id );
|
||||
if ( mbedtls_ecp_get_type(&grp) == MBEDTLS_ECP_TYPE_SHORT_WEIERSTRASS ) {
|
||||
printf( " %s", info->name );
|
||||
}
|
||||
info++;
|
||||
}
|
||||
printf( "\n" );
|
||||
return 0;
|
||||
}
|
||||
'''
|
||||
|
||||
|
||||
def join_src_path(*args):
|
||||
return os.path.normpath(os.path.join(os.path.dirname(__file__), "..", *args))
|
||||
|
||||
|
||||
def run_c_source(src, cflags):
|
||||
"""
|
||||
Compile and run C source code
|
||||
:param src: the c language code to run
|
||||
:param cflags: additional cflags passing to compiler
|
||||
:return:
|
||||
"""
|
||||
binname = tempfile.mktemp(prefix="mbedtls")
|
||||
fd, srcname = tempfile.mkstemp(prefix="mbedtls", suffix=".c")
|
||||
srcfile = os.fdopen(fd, mode="w")
|
||||
srcfile.write(src)
|
||||
srcfile.close()
|
||||
args = [CC,
|
||||
*cflags,
|
||||
'-I' + join_src_path("include"),
|
||||
"-o", binname,
|
||||
'-L' + MBEDTLS_LIBRARY_PATH,
|
||||
srcname,
|
||||
'-lmbedcrypto']
|
||||
|
||||
p = subprocess.run(args=args, check=False)
|
||||
if p.returncode != 0:
|
||||
return False
|
||||
p = subprocess.run(args=[binname], check=False, env={
|
||||
'LD_LIBRARY_PATH': MBEDTLS_LIBRARY_PATH
|
||||
})
|
||||
if p.returncode != 0:
|
||||
return False
|
||||
os.unlink(srcname)
|
||||
os.unlink(binname)
|
||||
return True
|
||||
|
||||
|
||||
def compute_curve(curve):
|
||||
"""compute comb table for curve"""
|
||||
r = run_c_source(
|
||||
SRC_DUMP_COMB_TABLE,
|
||||
[
|
||||
'-g',
|
||||
'-DCURVE_ID=MBEDTLS_ECP_DP_%s' % curve.upper(),
|
||||
'-DCURVE_NAME="%s"' % curve.lower(),
|
||||
])
|
||||
if not r:
|
||||
print("""\
|
||||
Unable to compile and run utility.""", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def usage():
|
||||
print("""
|
||||
Usage: python %s <curve>...
|
||||
|
||||
Arguments:
|
||||
curve Specify one or more curve names (e.g secp256r1)
|
||||
|
||||
All possible curves: """ % sys.argv[0])
|
||||
run_c_source(SRC_DUMP_KNOWN_CURVE, [])
|
||||
print("""
|
||||
Environment Variable:
|
||||
CC Specify which c compile to use to compile utility.
|
||||
MBEDTLS_LIBRARY_PATH
|
||||
Specify the path to mbedcrypto library. (e.g. build/library/)
|
||||
|
||||
How to add a new curve: %s""" % HOW_TO_ADD_NEW_CURVE)
|
||||
|
||||
|
||||
def run_main():
|
||||
shared_lib_path = os.path.normpath(os.path.join(MBEDTLS_LIBRARY_PATH, "libmbedcrypto.so"))
|
||||
static_lib_path = os.path.normpath(os.path.join(MBEDTLS_LIBRARY_PATH, "libmbedcrypto.a"))
|
||||
if not os.path.exists(shared_lib_path) and not os.path.exists(static_lib_path):
|
||||
print("Warning: both '%s' and '%s' are not exists. This script will use "
|
||||
"the library from your system instead of the library compiled by "
|
||||
"this source directory.\n"
|
||||
"You can specify library path using environment variable "
|
||||
"'MBEDTLS_LIBRARY_PATH'." % (shared_lib_path, static_lib_path),
|
||||
file=sys.stderr)
|
||||
|
||||
if len(sys.argv) <= 1:
|
||||
usage()
|
||||
else:
|
||||
for curve in sys.argv[1:]:
|
||||
compute_curve(curve)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_main()
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
# Parse a massif.out.xxx file and output peak total memory usage
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use warnings;
|
||||
use strict;
|
||||
|
||||
use utf8;
|
||||
use open qw(:std utf8);
|
||||
|
||||
die unless @ARGV == 1;
|
||||
|
||||
my @snaps;
|
||||
open my $fh, '<', $ARGV[0] or die;
|
||||
{ local $/ = 'snapshot='; @snaps = <$fh>; }
|
||||
close $fh or die;
|
||||
|
||||
my ($max, $max_heap, $max_he, $max_stack) = (0, 0, 0, 0);
|
||||
for (@snaps)
|
||||
{
|
||||
my ($heap, $heap_extra, $stack) = m{
|
||||
mem_heap_B=(\d+)\n
|
||||
mem_heap_extra_B=(\d+)\n
|
||||
mem_stacks_B=(\d+)
|
||||
}xm;
|
||||
next unless defined $heap;
|
||||
my $total = $heap + $heap_extra + $stack;
|
||||
if( $total > $max ) {
|
||||
($max, $max_heap, $max_he, $max_stack) = ($total, $heap, $heap_extra, $stack);
|
||||
}
|
||||
}
|
||||
|
||||
printf "$max (heap $max_heap+$max_he, stack $max_stack)\n";
|
||||
@@ -1,469 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""Audit validity date of X509 crt/crl/csr.
|
||||
|
||||
This script is used to audit the validity date of crt/crl/csr used for testing.
|
||||
It prints the information about X.509 objects excluding the objects that
|
||||
are valid throughout the desired validity period. The data are collected
|
||||
from framework/data_files/ and tests/suites/*.data files by default.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import typing
|
||||
import argparse
|
||||
import datetime
|
||||
import glob
|
||||
import logging
|
||||
import hashlib
|
||||
from enum import Enum
|
||||
|
||||
# The script requires cryptography >= 35.0.0 which is only available
|
||||
# for Python >= 3.6.
|
||||
import cryptography
|
||||
from cryptography import x509
|
||||
|
||||
from generate_test_code import FileWrapper
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_framework import build_tree
|
||||
from mbedtls_framework import logging_util
|
||||
|
||||
def check_cryptography_version():
|
||||
match = re.match(r'^[0-9]+', cryptography.__version__)
|
||||
if match is None or int(match.group(0)) < 35:
|
||||
raise Exception("audit-validity-dates requires cryptography >= 35.0.0"
|
||||
+ "({} is too old)".format(cryptography.__version__))
|
||||
|
||||
class DataType(Enum):
|
||||
CRT = 1 # Certificate
|
||||
CRL = 2 # Certificate Revocation List
|
||||
CSR = 3 # Certificate Signing Request
|
||||
|
||||
|
||||
class DataFormat(Enum):
|
||||
PEM = 1 # Privacy-Enhanced Mail
|
||||
DER = 2 # Distinguished Encoding Rules
|
||||
|
||||
|
||||
class AuditData:
|
||||
"""Store data location, type and validity period of X.509 objects."""
|
||||
#pylint: disable=too-few-public-methods
|
||||
def __init__(self, data_type: DataType, x509_obj):
|
||||
self.data_type = data_type
|
||||
# the locations that the x509 object could be found
|
||||
self.locations = [] # type: typing.List[str]
|
||||
self.fill_validity_duration(x509_obj)
|
||||
self._obj = x509_obj
|
||||
encoding = cryptography.hazmat.primitives.serialization.Encoding.DER
|
||||
self._identifier = hashlib.sha1(self._obj.public_bytes(encoding)).hexdigest()
|
||||
|
||||
@property
|
||||
def identifier(self):
|
||||
"""
|
||||
Identifier of the underlying X.509 object, which is consistent across
|
||||
different runs.
|
||||
"""
|
||||
return self._identifier
|
||||
|
||||
def fill_validity_duration(self, x509_obj):
|
||||
"""Read validity period from an X.509 object."""
|
||||
# Certificate expires after "not_valid_after"
|
||||
# Certificate is invalid before "not_valid_before"
|
||||
if self.data_type == DataType.CRT:
|
||||
self.not_valid_after = x509_obj.not_valid_after
|
||||
self.not_valid_before = x509_obj.not_valid_before
|
||||
# CertificateRevocationList expires after "next_update"
|
||||
# CertificateRevocationList is invalid before "last_update"
|
||||
elif self.data_type == DataType.CRL:
|
||||
self.not_valid_after = x509_obj.next_update
|
||||
self.not_valid_before = x509_obj.last_update
|
||||
# CertificateSigningRequest is always valid.
|
||||
elif self.data_type == DataType.CSR:
|
||||
self.not_valid_after = datetime.datetime.max
|
||||
self.not_valid_before = datetime.datetime.min
|
||||
else:
|
||||
raise ValueError("Unsupported file_type: {}".format(self.data_type))
|
||||
|
||||
|
||||
class X509Parser:
|
||||
"""A parser class to parse crt/crl/csr file or data in PEM/DER format."""
|
||||
PEM_REGEX = br'-{5}BEGIN (?P<type>.*?)-{5}(?P<data>.*?)-{5}END (?P=type)-{5}'
|
||||
PEM_TAG_REGEX = br'-{5}BEGIN (?P<type>.*?)-{5}\n'
|
||||
PEM_TAGS = {
|
||||
DataType.CRT: 'CERTIFICATE',
|
||||
DataType.CRL: 'X509 CRL',
|
||||
DataType.CSR: 'CERTIFICATE REQUEST'
|
||||
}
|
||||
|
||||
def __init__(self,
|
||||
backends:
|
||||
typing.Dict[DataType,
|
||||
typing.Dict[DataFormat,
|
||||
typing.Callable[[bytes], object]]]) \
|
||||
-> None:
|
||||
self.backends = backends
|
||||
self.__generate_parsers()
|
||||
|
||||
def __generate_parser(self, data_type: DataType):
|
||||
"""Parser generator for a specific DataType"""
|
||||
tag = self.PEM_TAGS[data_type]
|
||||
pem_loader = self.backends[data_type][DataFormat.PEM]
|
||||
der_loader = self.backends[data_type][DataFormat.DER]
|
||||
def wrapper(data: bytes):
|
||||
pem_type = X509Parser.pem_data_type(data)
|
||||
# It is in PEM format with target tag
|
||||
if pem_type == tag:
|
||||
return pem_loader(data)
|
||||
# It is in PEM format without target tag
|
||||
if pem_type:
|
||||
return None
|
||||
# It might be in DER format
|
||||
try:
|
||||
result = der_loader(data)
|
||||
except ValueError:
|
||||
result = None
|
||||
return result
|
||||
wrapper.__name__ = "{}.parser[{}]".format(type(self).__name__, tag)
|
||||
return wrapper
|
||||
|
||||
def __generate_parsers(self):
|
||||
"""Generate parsers for all support DataType"""
|
||||
self.parsers = {}
|
||||
for data_type, _ in self.PEM_TAGS.items():
|
||||
self.parsers[data_type] = self.__generate_parser(data_type)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.parsers[item]
|
||||
|
||||
@staticmethod
|
||||
def pem_data_type(data: bytes) -> typing.Optional[str]:
|
||||
"""Get the tag from the data in PEM format
|
||||
|
||||
:param data: data to be checked in binary mode.
|
||||
:return: PEM tag or "" when no tag detected.
|
||||
"""
|
||||
m = re.search(X509Parser.PEM_TAG_REGEX, data)
|
||||
if m is not None:
|
||||
return m.group('type').decode('UTF-8')
|
||||
else:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def check_hex_string(hex_str: str) -> bool:
|
||||
"""Check if the hex string is possibly DER data."""
|
||||
hex_len = len(hex_str)
|
||||
# At least 6 hex char for 3 bytes: Type + Length + Content
|
||||
if hex_len < 6:
|
||||
return False
|
||||
# Check if Type (1 byte) is SEQUENCE.
|
||||
if hex_str[0:2] != '30':
|
||||
return False
|
||||
# Check LENGTH (1 byte) value
|
||||
content_len = int(hex_str[2:4], base=16)
|
||||
consumed = 4
|
||||
if content_len in (128, 255):
|
||||
# Indefinite or Reserved
|
||||
return False
|
||||
elif content_len > 127:
|
||||
# Definite, Long
|
||||
length_len = (content_len - 128) * 2
|
||||
content_len = int(hex_str[consumed:consumed+length_len], base=16)
|
||||
consumed += length_len
|
||||
# Check LENGTH
|
||||
if hex_len != content_len * 2 + consumed:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class Auditor:
|
||||
"""
|
||||
A base class that uses X509Parser to parse files to a list of AuditData.
|
||||
|
||||
A subclass must implement the following methods:
|
||||
- collect_default_files: Return a list of file names that are defaultly
|
||||
used for parsing (auditing). The list will be stored in
|
||||
Auditor.default_files.
|
||||
- parse_file: Method that parses a single file to a list of AuditData.
|
||||
|
||||
A subclass may override the following methods:
|
||||
- parse_bytes: Defaultly, it parses `bytes` that contains only one valid
|
||||
X.509 data(DER/PEM format) to an X.509 object.
|
||||
- walk_all: Defaultly, it iterates over all the files in the provided
|
||||
file name list, calls `parse_file` for each file and stores the results
|
||||
by extending the `results` passed to the function.
|
||||
"""
|
||||
def __init__(self, logger):
|
||||
self.logger = logger
|
||||
self.default_files = self.collect_default_files()
|
||||
self.parser = X509Parser({
|
||||
DataType.CRT: {
|
||||
DataFormat.PEM: x509.load_pem_x509_certificate,
|
||||
DataFormat.DER: x509.load_der_x509_certificate
|
||||
},
|
||||
DataType.CRL: {
|
||||
DataFormat.PEM: x509.load_pem_x509_crl,
|
||||
DataFormat.DER: x509.load_der_x509_crl
|
||||
},
|
||||
DataType.CSR: {
|
||||
DataFormat.PEM: x509.load_pem_x509_csr,
|
||||
DataFormat.DER: x509.load_der_x509_csr
|
||||
},
|
||||
})
|
||||
|
||||
def collect_default_files(self) -> typing.List[str]:
|
||||
"""Collect the default files for parsing."""
|
||||
raise NotImplementedError
|
||||
|
||||
def parse_file(self, filename: str) -> typing.List[AuditData]:
|
||||
"""
|
||||
Parse a list of AuditData from file.
|
||||
|
||||
:param filename: name of the file to parse.
|
||||
:return list of AuditData parsed from the file.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def parse_bytes(self, data: bytes):
|
||||
"""Parse AuditData from bytes."""
|
||||
for data_type in list(DataType):
|
||||
try:
|
||||
result = self.parser[data_type](data)
|
||||
except ValueError as val_error:
|
||||
result = None
|
||||
self.logger.warning(val_error)
|
||||
if result is not None:
|
||||
audit_data = AuditData(data_type, result)
|
||||
return audit_data
|
||||
return None
|
||||
|
||||
def walk_all(self,
|
||||
results: typing.Dict[str, AuditData],
|
||||
file_list: typing.Optional[typing.List[str]] = None) \
|
||||
-> None:
|
||||
"""
|
||||
Iterate over all the files in the list and get audit data. The
|
||||
results will be written to `results` passed to this function.
|
||||
|
||||
:param results: The dictionary used to store the parsed
|
||||
AuditData. The keys of this dictionary should
|
||||
be the identifier of the AuditData.
|
||||
"""
|
||||
if file_list is None:
|
||||
file_list = self.default_files
|
||||
for filename in file_list:
|
||||
data_list = self.parse_file(filename)
|
||||
for d in data_list:
|
||||
if d.identifier in results:
|
||||
results[d.identifier].locations.extend(d.locations)
|
||||
else:
|
||||
results[d.identifier] = d
|
||||
|
||||
@staticmethod
|
||||
def find_test_dir():
|
||||
"""Get the relative path for the Mbed TLS test directory."""
|
||||
return os.path.relpath(build_tree.guess_mbedtls_root() + '/tests')
|
||||
|
||||
|
||||
class TestDataAuditor(Auditor):
|
||||
"""Class for auditing files in `framework/data_files/`"""
|
||||
|
||||
def collect_default_files(self):
|
||||
"""Collect all files in `framework/data_files/`"""
|
||||
test_data_glob = os.path.join(build_tree.guess_mbedtls_root(),
|
||||
'framework', 'data_files/**')
|
||||
data_files = [f for f in glob.glob(test_data_glob, recursive=True)
|
||||
if os.path.isfile(f)]
|
||||
return data_files
|
||||
|
||||
def parse_file(self, filename: str) -> typing.List[AuditData]:
|
||||
"""
|
||||
Parse a list of AuditData from data file.
|
||||
|
||||
:param filename: name of the file to parse.
|
||||
:return list of AuditData parsed from the file.
|
||||
"""
|
||||
with open(filename, 'rb') as f:
|
||||
data = f.read()
|
||||
|
||||
results = []
|
||||
# Try to parse all PEM blocks.
|
||||
is_pem = False
|
||||
for idx, m in enumerate(re.finditer(X509Parser.PEM_REGEX, data, flags=re.S), 1):
|
||||
is_pem = True
|
||||
result = self.parse_bytes(data[m.start():m.end()])
|
||||
if result is not None:
|
||||
result.locations.append("{}#{}".format(filename, idx))
|
||||
results.append(result)
|
||||
|
||||
# Might be DER format.
|
||||
if not is_pem:
|
||||
result = self.parse_bytes(data)
|
||||
if result is not None:
|
||||
result.locations.append("{}".format(filename))
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def parse_suite_data(data_f):
|
||||
"""
|
||||
Parses .data file for test arguments that possiblly have a
|
||||
valid X.509 data. If you need a more precise parser, please
|
||||
use generate_test_code.parse_test_data instead.
|
||||
|
||||
:param data_f: file object of the data file.
|
||||
:return: Generator that yields test function argument list.
|
||||
"""
|
||||
for line in data_f:
|
||||
line = line.strip()
|
||||
# Skip comments
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
|
||||
# Check parameters line
|
||||
match = re.search(r'\A\w+(.*:)?\"', line)
|
||||
if match:
|
||||
# Read test vectors
|
||||
parts = re.split(r'(?<!\\):', line)
|
||||
parts = [x for x in parts if x]
|
||||
args = parts[1:]
|
||||
yield args
|
||||
|
||||
|
||||
class SuiteDataAuditor(Auditor):
|
||||
"""Class for auditing files in `tests/suites/*.data`"""
|
||||
|
||||
def collect_default_files(self):
|
||||
"""Collect all files in `tests/suites/*.data`"""
|
||||
test_dir = self.find_test_dir()
|
||||
suites_data_folder = os.path.join(test_dir, 'suites')
|
||||
data_files = glob.glob(os.path.join(suites_data_folder, '*.data'))
|
||||
return data_files
|
||||
|
||||
def parse_file(self, filename: str):
|
||||
"""
|
||||
Parse a list of AuditData from test suite data file.
|
||||
|
||||
:param filename: name of the file to parse.
|
||||
:return list of AuditData parsed from the file.
|
||||
"""
|
||||
audit_data_list = []
|
||||
data_f = FileWrapper(filename)
|
||||
for test_args in parse_suite_data(data_f):
|
||||
for idx, test_arg in enumerate(test_args):
|
||||
match = re.match(r'"(?P<data>[0-9a-fA-F]+)"', test_arg)
|
||||
if not match:
|
||||
continue
|
||||
if not X509Parser.check_hex_string(match.group('data')):
|
||||
continue
|
||||
audit_data = self.parse_bytes(bytes.fromhex(match.group('data')))
|
||||
if audit_data is None:
|
||||
continue
|
||||
audit_data.locations.append("{}:{}:#{}".format(filename,
|
||||
data_f.line_no,
|
||||
idx + 1))
|
||||
audit_data_list.append(audit_data)
|
||||
|
||||
return audit_data_list
|
||||
|
||||
|
||||
def list_all(audit_data: AuditData):
|
||||
for loc in audit_data.locations:
|
||||
print("{}\t{:20}\t{:20}\t{:3}\t{}".format(
|
||||
audit_data.identifier,
|
||||
audit_data.not_valid_before.isoformat(timespec='seconds'),
|
||||
audit_data.not_valid_after.isoformat(timespec='seconds'),
|
||||
audit_data.data_type.name,
|
||||
loc))
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Perform argument parsing.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
|
||||
parser.add_argument('-a', '--all',
|
||||
action='store_true',
|
||||
help='list the information of all the files')
|
||||
parser.add_argument('-v', '--verbose',
|
||||
action='store_true', dest='verbose',
|
||||
help='show logs')
|
||||
parser.add_argument('--from', dest='start_date',
|
||||
help=('Start of desired validity period (UTC, YYYY-MM-DD). '
|
||||
'Default: today'),
|
||||
metavar='DATE')
|
||||
parser.add_argument('--to', dest='end_date',
|
||||
help=('End of desired validity period (UTC, YYYY-MM-DD). '
|
||||
'Default: --from'),
|
||||
metavar='DATE')
|
||||
parser.add_argument('--data-files', action='append', nargs='*',
|
||||
help='data files to audit',
|
||||
metavar='FILE')
|
||||
parser.add_argument('--suite-data-files', action='append', nargs='*',
|
||||
help='suite data files to audit',
|
||||
metavar='FILE')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# start main routine
|
||||
# setup logger
|
||||
logger = logging.getLogger()
|
||||
logging_util.configure_logger(logger)
|
||||
logger.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
|
||||
|
||||
td_auditor = TestDataAuditor(logger)
|
||||
sd_auditor = SuiteDataAuditor(logger)
|
||||
|
||||
data_files = []
|
||||
suite_data_files = []
|
||||
if args.data_files is None and args.suite_data_files is None:
|
||||
data_files = td_auditor.default_files
|
||||
suite_data_files = sd_auditor.default_files
|
||||
else:
|
||||
if args.data_files is not None:
|
||||
data_files = [x for l in args.data_files for x in l]
|
||||
if args.suite_data_files is not None:
|
||||
suite_data_files = [x for l in args.suite_data_files for x in l]
|
||||
|
||||
# validity period start date
|
||||
if args.start_date:
|
||||
start_date = datetime.datetime.fromisoformat(args.start_date)
|
||||
else:
|
||||
start_date = datetime.datetime.today()
|
||||
# validity period end date
|
||||
if args.end_date:
|
||||
end_date = datetime.datetime.fromisoformat(args.end_date)
|
||||
else:
|
||||
end_date = start_date
|
||||
|
||||
# go through all the files
|
||||
audit_results = {}
|
||||
td_auditor.walk_all(audit_results, data_files)
|
||||
sd_auditor.walk_all(audit_results, suite_data_files)
|
||||
|
||||
logger.info("Total: {} objects found!".format(len(audit_results)))
|
||||
|
||||
# we filter out the files whose validity duration covers the provided
|
||||
# duration.
|
||||
filter_func = lambda d: (start_date < d.not_valid_before) or \
|
||||
(d.not_valid_after < end_date)
|
||||
|
||||
sortby_end = lambda d: d.not_valid_after
|
||||
|
||||
if args.all:
|
||||
filter_func = None
|
||||
|
||||
# filter and output the results
|
||||
for d in sorted(filter(filter_func, audit_results.values()), key=sortby_end):
|
||||
list_all(d)
|
||||
|
||||
logger.debug("Done!")
|
||||
|
||||
check_cryptography_version()
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,96 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# Based on NIST CTR_DRBG.rsp validation file
|
||||
# Only uses AES-256-CTR cases that use a Derivation function
|
||||
# and concats nonce and personalization for initialization.
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use strict;
|
||||
|
||||
my $file = shift;
|
||||
|
||||
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
|
||||
|
||||
sub get_suite_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
|
||||
my $line = <TEST_DATA>;
|
||||
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub get_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
my $line;
|
||||
|
||||
while($line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /=/);
|
||||
last;
|
||||
}
|
||||
|
||||
($val) = ($line =~ /^$name = (\w+)/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
my $cnt = 1;;
|
||||
while (my $line = <TEST_DATA>)
|
||||
{
|
||||
next if ($line !~ /^\[AES-256 use df/);
|
||||
|
||||
my $PredictionResistanceStr = get_suite_val("PredictionResistance");
|
||||
my $PredictionResistance = 0;
|
||||
$PredictionResistance = 1 if ($PredictionResistanceStr eq 'True');
|
||||
my $EntropyInputLen = get_suite_val("EntropyInputLen");
|
||||
my $NonceLen = get_suite_val("NonceLen");
|
||||
my $PersonalizationStringLen = get_suite_val("PersonalizationStringLen");
|
||||
my $AdditionalInputLen = get_suite_val("AdditionalInputLen");
|
||||
|
||||
for ($cnt = 0; $cnt < 15; $cnt++)
|
||||
{
|
||||
my $Count = get_val("COUNT");
|
||||
my $EntropyInput = get_val("EntropyInput");
|
||||
my $Nonce = get_val("Nonce");
|
||||
my $PersonalizationString = get_val("PersonalizationString");
|
||||
my $AdditionalInput1 = get_val("AdditionalInput");
|
||||
my $EntropyInputPR1 = get_val("EntropyInputPR") if ($PredictionResistance == 1);
|
||||
my $EntropyInputReseed = get_val("EntropyInputReseed") if ($PredictionResistance == 0);
|
||||
my $AdditionalInputReseed = get_val("AdditionalInputReseed") if ($PredictionResistance == 0);
|
||||
my $AdditionalInput2 = get_val("AdditionalInput");
|
||||
my $EntropyInputPR2 = get_val("EntropyInputPR") if ($PredictionResistance == 1);
|
||||
my $ReturnedBits = get_val("ReturnedBits");
|
||||
|
||||
if ($PredictionResistance == 1)
|
||||
{
|
||||
print("CTR_DRBG NIST Validation (AES-256 use df,$PredictionResistanceStr,$EntropyInputLen,$NonceLen,$PersonalizationStringLen,$AdditionalInputLen) #$Count\n");
|
||||
print("ctr_drbg_validate_pr");
|
||||
print(":\"$Nonce$PersonalizationString\"");
|
||||
print(":\"$EntropyInput$EntropyInputPR1$EntropyInputPR2\"");
|
||||
print(":\"$AdditionalInput1\"");
|
||||
print(":\"$AdditionalInput2\"");
|
||||
print(":\"$ReturnedBits\"");
|
||||
print("\n\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
print("CTR_DRBG NIST Validation (AES-256 use df,$PredictionResistanceStr,$EntropyInputLen,$NonceLen,$PersonalizationStringLen,$AdditionalInputLen) #$Count\n");
|
||||
print("ctr_drbg_validate_nopr");
|
||||
print(":\"$Nonce$PersonalizationString\"");
|
||||
print(":\"$EntropyInput$EntropyInputReseed\"");
|
||||
print(":\"$AdditionalInput1\"");
|
||||
print(":\"$AdditionalInputReseed\"");
|
||||
print(":\"$AdditionalInput2\"");
|
||||
print(":\"$ReturnedBits\"");
|
||||
print("\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
close(TEST_DATA);
|
||||
@@ -1,101 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# Based on NIST gcmDecryptxxx.rsp validation files
|
||||
# Only first 3 of every set used for compile time saving
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use strict;
|
||||
|
||||
my $file = shift;
|
||||
|
||||
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
|
||||
|
||||
sub get_suite_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
|
||||
while(my $line = <TEST_DATA>)
|
||||
{
|
||||
next if ($line !~ /^\[/);
|
||||
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
|
||||
last;
|
||||
}
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub get_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
my $line;
|
||||
|
||||
while($line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /=/);
|
||||
last;
|
||||
}
|
||||
|
||||
($val) = ($line =~ /^$name = (\w+)/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub get_val_or_fail($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "FAIL";
|
||||
my $line;
|
||||
|
||||
while($line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /=/ && $line !~ /FAIL/);
|
||||
last;
|
||||
}
|
||||
|
||||
($val) = ($line =~ /^$name = (\w+)/) if ($line =~ /=/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
my $cnt = 1;;
|
||||
while (my $line = <TEST_DATA>)
|
||||
{
|
||||
my $key_len = get_suite_val("Keylen");
|
||||
next if ($key_len !~ /\d+/);
|
||||
my $iv_len = get_suite_val("IVlen");
|
||||
my $pt_len = get_suite_val("PTlen");
|
||||
my $add_len = get_suite_val("AADlen");
|
||||
my $tag_len = get_suite_val("Taglen");
|
||||
|
||||
for ($cnt = 0; $cnt < 3; $cnt++)
|
||||
{
|
||||
my $Count = get_val("Count");
|
||||
my $key = get_val("Key");
|
||||
my $iv = get_val("IV");
|
||||
my $ct = get_val("CT");
|
||||
my $add = get_val("AAD");
|
||||
my $tag = get_val("Tag");
|
||||
my $pt = get_val_or_fail("PT");
|
||||
|
||||
print("GCM NIST Validation (AES-$key_len,$iv_len,$pt_len,$add_len,$tag_len) #$Count\n");
|
||||
print("gcm_decrypt_and_verify");
|
||||
print(":\"$key\"");
|
||||
print(":\"$ct\"");
|
||||
print(":\"$iv\"");
|
||||
print(":\"$add\"");
|
||||
print(":$tag_len");
|
||||
print(":\"$tag\"");
|
||||
print(":\"$pt\"");
|
||||
print(":0");
|
||||
print("\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
print("GCM Selftest\n");
|
||||
print("gcm_selftest:\n\n");
|
||||
|
||||
close(TEST_DATA);
|
||||
@@ -1,84 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# Based on NIST gcmEncryptIntIVxxx.rsp validation files
|
||||
# Only first 3 of every set used for compile time saving
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use strict;
|
||||
|
||||
my $file = shift;
|
||||
|
||||
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
|
||||
|
||||
sub get_suite_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
|
||||
while(my $line = <TEST_DATA>)
|
||||
{
|
||||
next if ($line !~ /^\[/);
|
||||
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
|
||||
last;
|
||||
}
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub get_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
my $line;
|
||||
|
||||
while($line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /=/);
|
||||
last;
|
||||
}
|
||||
|
||||
($val) = ($line =~ /^$name = (\w+)/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
my $cnt = 1;;
|
||||
while (my $line = <TEST_DATA>)
|
||||
{
|
||||
my $key_len = get_suite_val("Keylen");
|
||||
next if ($key_len !~ /\d+/);
|
||||
my $iv_len = get_suite_val("IVlen");
|
||||
my $pt_len = get_suite_val("PTlen");
|
||||
my $add_len = get_suite_val("AADlen");
|
||||
my $tag_len = get_suite_val("Taglen");
|
||||
|
||||
for ($cnt = 0; $cnt < 3; $cnt++)
|
||||
{
|
||||
my $Count = get_val("Count");
|
||||
my $key = get_val("Key");
|
||||
my $pt = get_val("PT");
|
||||
my $add = get_val("AAD");
|
||||
my $iv = get_val("IV");
|
||||
my $ct = get_val("CT");
|
||||
my $tag = get_val("Tag");
|
||||
|
||||
print("GCM NIST Validation (AES-$key_len,$iv_len,$pt_len,$add_len,$tag_len) #$Count\n");
|
||||
print("gcm_encrypt_and_tag");
|
||||
print(":\"$key\"");
|
||||
print(":\"$pt\"");
|
||||
print(":\"$iv\"");
|
||||
print(":\"$add\"");
|
||||
print(":\"$ct\"");
|
||||
print(":$tag_len");
|
||||
print(":\"$tag\"");
|
||||
print(":0");
|
||||
print("\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
print("GCM Selftest\n");
|
||||
print("gcm_selftest:\n\n");
|
||||
|
||||
close(TEST_DATA);
|
||||
@@ -1,74 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use strict;
|
||||
|
||||
my $file = shift;
|
||||
|
||||
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
|
||||
|
||||
sub get_val($$)
|
||||
{
|
||||
my $str = shift;
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
|
||||
while(my $line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /^# $str/);
|
||||
last;
|
||||
}
|
||||
|
||||
while(my $line = <TEST_DATA>)
|
||||
{
|
||||
last if($line eq "\r\n");
|
||||
$val .= $line;
|
||||
}
|
||||
|
||||
$val =~ s/[ \r\n]//g;
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
my $state = 0;
|
||||
my $val_n = "";
|
||||
my $val_e = "";
|
||||
my $val_p = "";
|
||||
my $val_q = "";
|
||||
my $mod = 0;
|
||||
my $cnt = 1;
|
||||
while (my $line = <TEST_DATA>)
|
||||
{
|
||||
next if ($line !~ /^# Example/);
|
||||
|
||||
( $mod ) = ($line =~ /A (\d+)/);
|
||||
$val_n = get_val("RSA modulus n", "N");
|
||||
$val_e = get_val("RSA public exponent e", "E");
|
||||
$val_p = get_val("Prime p", "P");
|
||||
$val_q = get_val("Prime q", "Q");
|
||||
|
||||
for(my $i = 1; $i <= 6; $i++)
|
||||
{
|
||||
my $val_m = get_val("Message to be", "M");
|
||||
my $val_salt = get_val("Salt", "Salt");
|
||||
my $val_sig = get_val("Signature", "Sig");
|
||||
|
||||
print("RSASSA-PSS Signature Example ${cnt}_${i}\n");
|
||||
print("pkcs1_rsassa_pss_sign:$mod:16:\"$val_p\":16:\"$val_q\":16:\"$val_n\":16:\"$val_e\":SIG_RSA_SHA1:MBEDTLS_MD_SHA1");
|
||||
print(":\"$val_m\"");
|
||||
print(":\"$val_salt\"");
|
||||
print(":\"$val_sig\":0");
|
||||
print("\n\n");
|
||||
|
||||
print("RSASSA-PSS Signature Example ${cnt}_${i} (verify)\n");
|
||||
print("pkcs1_rsassa_pss_verify:$mod:16:\"$val_n\":16:\"$val_e\":SIG_RSA_SHA1:MBEDTLS_MD_SHA1");
|
||||
print(":\"$val_m\"");
|
||||
print(":\"$val_salt\"");
|
||||
print(":\"$val_sig\":0");
|
||||
print("\n\n");
|
||||
}
|
||||
$cnt++;
|
||||
}
|
||||
close(TEST_DATA);
|
||||
@@ -1,71 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script splits the data test files containing the test cases into
|
||||
# individual files (one test case per file) suitable for use with afl
|
||||
# (American Fuzzy Lop). http://lcamtuf.coredump.cx/afl/
|
||||
#
|
||||
# Usage: generate-afl-tests.sh <test data file path>
|
||||
# <test data file path> - should be the path to one of the test suite files
|
||||
# such as 'test_suite_rsa.data'
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
# Abort on errors
|
||||
set -e
|
||||
|
||||
if [ -z $1 ]
|
||||
then
|
||||
echo " [!] No test file specified" >&2
|
||||
echo "Usage: $0 <test data file>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SRC_FILEPATH=$(dirname $1)/$(basename $1)
|
||||
TESTSUITE=$(basename $1 .data)
|
||||
|
||||
THIS_DIR=$(basename $PWD)
|
||||
|
||||
if [ -d ../library -a -d ../include -a -d ../tests -a $THIS_DIR == "tests" ];
|
||||
then :;
|
||||
else
|
||||
echo " [!] Must be run from Mbed TLS tests directory" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEST_TESTCASE_DIR=$TESTSUITE-afl-tests
|
||||
DEST_OUTPUT_DIR=$TESTSUITE-afl-out
|
||||
|
||||
echo " [+] Creating output directories" >&2
|
||||
|
||||
if [ -e $DEST_OUTPUT_DIR/* ];
|
||||
then :
|
||||
echo " [!] Test output files already exist." >&2
|
||||
exit 1
|
||||
else
|
||||
mkdir -p $DEST_OUTPUT_DIR
|
||||
fi
|
||||
|
||||
if [ -e $DEST_TESTCASE_DIR/* ];
|
||||
then :
|
||||
echo " [!] Test output files already exist." >&2
|
||||
else
|
||||
mkdir -p $DEST_TESTCASE_DIR
|
||||
fi
|
||||
|
||||
echo " [+] Creating test cases" >&2
|
||||
cd $DEST_TESTCASE_DIR
|
||||
|
||||
split -p '^\s*$' ../$SRC_FILEPATH
|
||||
|
||||
for f in *;
|
||||
do
|
||||
# Strip out any blank lines (no trim on OS X)
|
||||
sed '/^\s*$/d' $f >testcase_$f
|
||||
rm $f
|
||||
done
|
||||
|
||||
cd ..
|
||||
|
||||
echo " [+] Test cases in $DEST_TESTCASE_DIR" >&2
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate server9-bad-saltlen.crt
|
||||
|
||||
Generate a certificate signed with RSA-PSS, with an incorrect salt length.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import subprocess
|
||||
import argparse
|
||||
from asn1crypto import pem, x509, core #type: ignore #pylint: disable=import-error
|
||||
|
||||
OPENSSL_RSA_PSS_CERT_COMMAND = r'''
|
||||
openssl x509 -req -CA {ca_name}.crt -CAkey {ca_name}.key -set_serial 24 {ca_password} \
|
||||
{openssl_extfile} -days 3650 -outform DER -in {csr} \
|
||||
-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{anounce_saltlen} \
|
||||
-sigopt rsa_mgf1_md:sha256
|
||||
'''
|
||||
SIG_OPT = \
|
||||
r'-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{saltlen} -sigopt rsa_mgf1_md:sha256'
|
||||
OPENSSL_RSA_PSS_DGST_COMMAND = r'''openssl dgst -sign {ca_name}.key {ca_password} \
|
||||
-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{actual_saltlen} \
|
||||
-sigopt rsa_mgf1_md:sha256'''
|
||||
|
||||
|
||||
def auto_int(x):
|
||||
return int(x, 0)
|
||||
|
||||
|
||||
def build_argparser(parser):
|
||||
"""Build argument parser"""
|
||||
parser.description = __doc__
|
||||
parser.add_argument('--ca-name', type=str, required=True,
|
||||
help='Basename of CA files')
|
||||
parser.add_argument('--ca-password', type=str,
|
||||
required=True, help='CA key file password')
|
||||
parser.add_argument('--csr', type=str, required=True,
|
||||
help='CSR file for generating certificate')
|
||||
parser.add_argument('--openssl-extfile', type=str,
|
||||
required=True, help='X905 v3 extension config file')
|
||||
parser.add_argument('--anounce_saltlen', type=auto_int,
|
||||
required=True, help='Announced salt length')
|
||||
parser.add_argument('--actual_saltlen', type=auto_int,
|
||||
required=True, help='Actual salt length')
|
||||
parser.add_argument('--output', type=str, required=True)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
build_argparser(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
return generate(**vars(args))
|
||||
|
||||
def generate(**kwargs):
|
||||
"""Generate different salt length certificate file."""
|
||||
ca_password = kwargs.get('ca_password', '')
|
||||
if ca_password:
|
||||
kwargs['ca_password'] = r'-passin "pass:{ca_password}"'.format(
|
||||
**kwargs)
|
||||
else:
|
||||
kwargs['ca_password'] = ''
|
||||
extfile = kwargs.get('openssl_extfile', '')
|
||||
if extfile:
|
||||
kwargs['openssl_extfile'] = '-extfile {openssl_extfile}'.format(
|
||||
**kwargs)
|
||||
else:
|
||||
kwargs['openssl_extfile'] = ''
|
||||
|
||||
cmd = OPENSSL_RSA_PSS_CERT_COMMAND.format(**kwargs)
|
||||
der_bytes = subprocess.check_output(cmd, shell=True)
|
||||
target_certificate = x509.Certificate.load(der_bytes)
|
||||
|
||||
cmd = OPENSSL_RSA_PSS_DGST_COMMAND.format(**kwargs)
|
||||
#pylint: disable=unexpected-keyword-arg
|
||||
der_bytes = subprocess.check_output(cmd,
|
||||
input=target_certificate['tbs_certificate'].dump(),
|
||||
shell=True)
|
||||
|
||||
with open(kwargs.get('output'), 'wb') as f:
|
||||
target_certificate['signature_value'] = core.OctetBitString(der_bytes)
|
||||
f.write(pem.armor('CERTIFICATE', target_certificate.dump()))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,89 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
help () {
|
||||
cat <<EOF
|
||||
Usage: $0 [OPTION] [PLATFORM]...
|
||||
Run all the metatests whose platform matches any of the given PLATFORM.
|
||||
A PLATFORM can contain shell wildcards.
|
||||
|
||||
Expected output: a lot of scary-looking error messages, since each
|
||||
metatest is expected to report a failure. The final line should be
|
||||
"Ran N metatests, all good."
|
||||
|
||||
If something goes wrong: the final line should be
|
||||
"Ran N metatests, X unexpected successes". Look for "Unexpected success"
|
||||
in the logs above.
|
||||
|
||||
-l List the available metatests, don't run them.
|
||||
EOF
|
||||
}
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
set -e -u
|
||||
|
||||
if [ -d programs ]; then
|
||||
METATEST_PROGRAM=programs/test/metatest
|
||||
elif [ -d ../programs ]; then
|
||||
METATEST_PROGRAM=../programs/test/metatest
|
||||
elif [ -d ../../programs ]; then
|
||||
METATEST_PROGRAM=../../programs/test/metatest
|
||||
else
|
||||
echo >&2 "$0: FATAL: programs/test/metatest not found"
|
||||
exit 120
|
||||
fi
|
||||
|
||||
LIST_ONLY=
|
||||
while getopts hl OPTLET; do
|
||||
case $OPTLET in
|
||||
h) help; exit;;
|
||||
l) LIST_ONLY=1;;
|
||||
\?) help >&2; exit 120;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
list_matches () {
|
||||
while read name platform junk; do
|
||||
for pattern in "$@"; do
|
||||
case $platform in
|
||||
$pattern) echo "$name"; break;;
|
||||
esac
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
count=0
|
||||
errors=0
|
||||
run_metatest () {
|
||||
ret=0
|
||||
"$METATEST_PROGRAM" "$1" || ret=$?
|
||||
if [ $ret -eq 0 ]; then
|
||||
echo >&2 "$0: Unexpected success: $1"
|
||||
errors=$((errors + 1))
|
||||
fi
|
||||
count=$((count + 1))
|
||||
}
|
||||
|
||||
# Don't pipe the output of metatest so that if it fails, this script exits
|
||||
# immediately with a failure status.
|
||||
full_list=$("$METATEST_PROGRAM" list)
|
||||
matching_list=$(printf '%s\n' "$full_list" | list_matches "$@")
|
||||
|
||||
if [ -n "$LIST_ONLY" ]; then
|
||||
printf '%s\n' $matching_list
|
||||
exit
|
||||
fi
|
||||
|
||||
for name in $matching_list; do
|
||||
run_metatest "$name"
|
||||
done
|
||||
|
||||
if [ $errors -eq 0 ]; then
|
||||
echo "Ran $count metatests, all good."
|
||||
exit 0
|
||||
else
|
||||
echo "Ran $count metatests, $errors unexpected successes."
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Run the Mbed TLS demo scripts.
|
||||
"""
|
||||
import argparse
|
||||
import glob
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def run_demo(demo, quiet=False):
|
||||
"""Run the specified demo script. Return True if it succeeds."""
|
||||
args = {}
|
||||
if quiet:
|
||||
args['stdout'] = subprocess.DEVNULL
|
||||
args['stderr'] = subprocess.DEVNULL
|
||||
returncode = subprocess.call([demo], **args)
|
||||
return returncode == 0
|
||||
|
||||
def run_demos(demos, quiet=False):
|
||||
"""Run the specified demos and print summary information about failures.
|
||||
|
||||
Return True if all demos passed and False if a demo fails.
|
||||
"""
|
||||
failures = []
|
||||
for demo in demos:
|
||||
if not quiet:
|
||||
print('#### {} ####'.format(demo))
|
||||
success = run_demo(demo, quiet=quiet)
|
||||
if not success:
|
||||
failures.append(demo)
|
||||
if not quiet:
|
||||
print('{}: FAIL'.format(demo))
|
||||
if quiet:
|
||||
print('{}: {}'.format(demo, 'PASS' if success else 'FAIL'))
|
||||
else:
|
||||
print('')
|
||||
successes = len(demos) - len(failures)
|
||||
print('{}/{} demos passed'.format(successes, len(demos)))
|
||||
if failures and not quiet:
|
||||
print('Failures:', *failures)
|
||||
return not failures
|
||||
|
||||
def run_all_demos(quiet=False):
|
||||
"""Run all the available demos.
|
||||
|
||||
Return True if all demos passed and False if a demo fails.
|
||||
"""
|
||||
mbedtls_demos = glob.glob('programs/*/*_demo.sh')
|
||||
tf_psa_crypto_demos = glob.glob('tf-psa-crypto/programs/*/*_demo.sh')
|
||||
all_demos = mbedtls_demos + tf_psa_crypto_demos
|
||||
if not all_demos:
|
||||
# Keep the message on one line. pylint: disable=line-too-long
|
||||
raise Exception('No demos found. run_demos needs to operate from the Mbed TLS toplevel directory.')
|
||||
return run_demos(all_demos, quiet=quiet)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument('--quiet', '-q',
|
||||
action='store_true',
|
||||
help="suppress the output of demos")
|
||||
options = parser.parse_args()
|
||||
success = run_all_demos(quiet=options.quiet)
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,175 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""Test helper for the Mbed TLS configuration file tool
|
||||
|
||||
Run config.py with various parameters and write the results to files.
|
||||
|
||||
This is a harness to help regression testing, not a functional tester.
|
||||
Sample usage:
|
||||
|
||||
test_config_script.py -d old
|
||||
## Modify config.py and/or mbedtls_config.h ##
|
||||
test_config_script.py -d new
|
||||
diff -ru old new
|
||||
"""
|
||||
|
||||
## Copyright The Mbed TLS Contributors
|
||||
## SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
##
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
OUTPUT_FILE_PREFIX = 'config-'
|
||||
|
||||
def output_file_name(directory, stem, extension):
|
||||
return os.path.join(directory,
|
||||
'{}{}.{}'.format(OUTPUT_FILE_PREFIX,
|
||||
stem, extension))
|
||||
|
||||
def cleanup_directory(directory):
|
||||
"""Remove old output files."""
|
||||
for extension in []:
|
||||
pattern = output_file_name(directory, '*', extension)
|
||||
filenames = glob.glob(pattern)
|
||||
for filename in filenames:
|
||||
os.remove(filename)
|
||||
|
||||
def prepare_directory(directory):
|
||||
"""Create the output directory if it doesn't exist yet.
|
||||
|
||||
If there are old output files, remove them.
|
||||
"""
|
||||
if os.path.exists(directory):
|
||||
cleanup_directory(directory)
|
||||
else:
|
||||
os.makedirs(directory)
|
||||
|
||||
def guess_presets_from_help(help_text):
|
||||
"""Figure out what presets the script supports.
|
||||
|
||||
help_text should be the output from running the script with --help.
|
||||
"""
|
||||
# Try the output format from config.py
|
||||
hits = re.findall(r'\{([-\w,]+)\}', help_text)
|
||||
for hit in hits:
|
||||
words = set(hit.split(','))
|
||||
if 'get' in words and 'set' in words and 'unset' in words:
|
||||
words.remove('get')
|
||||
words.remove('set')
|
||||
words.remove('unset')
|
||||
return words
|
||||
# Try the output format from config.pl
|
||||
hits = re.findall(r'\n +([-\w]+) +- ', help_text)
|
||||
if hits:
|
||||
return hits
|
||||
raise Exception("Unable to figure out supported presets. Pass the '-p' option.")
|
||||
|
||||
def list_presets(options):
|
||||
"""Return the list of presets to test.
|
||||
|
||||
The list is taken from the command line if present, otherwise it is
|
||||
extracted from running the config script with --help.
|
||||
"""
|
||||
if options.presets:
|
||||
return re.split(r'[ ,]+', options.presets)
|
||||
else:
|
||||
help_text = subprocess.run([options.script, '--help'],
|
||||
check=False, # config.pl --help returns 255
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT).stdout
|
||||
return guess_presets_from_help(help_text.decode('ascii'))
|
||||
|
||||
def run_one(options, args, stem_prefix='', input_file=None):
|
||||
"""Run the config script with the given arguments.
|
||||
|
||||
Take the original content from input_file if specified, defaulting
|
||||
to options.input_file if input_file is None.
|
||||
|
||||
Write the following files, where xxx contains stem_prefix followed by
|
||||
a filename-friendly encoding of args:
|
||||
* config-xxx.h: modified file.
|
||||
* config-xxx.out: standard output.
|
||||
* config-xxx.err: standard output.
|
||||
* config-xxx.status: exit code.
|
||||
|
||||
Return ("xxx+", "path/to/config-xxx.h") which can be used as
|
||||
stem_prefix and input_file to call this function again with new args.
|
||||
"""
|
||||
if input_file is None:
|
||||
input_file = options.input_file
|
||||
stem = stem_prefix + '-'.join(args)
|
||||
data_filename = output_file_name(options.output_directory, stem, 'h')
|
||||
stdout_filename = output_file_name(options.output_directory, stem, 'out')
|
||||
stderr_filename = output_file_name(options.output_directory, stem, 'err')
|
||||
status_filename = output_file_name(options.output_directory, stem, 'status')
|
||||
shutil.copy(input_file, data_filename)
|
||||
# Pass only the file basename, not the full path, to avoid getting the
|
||||
# directory name in error messages, which would make comparisons
|
||||
# between output directories more difficult.
|
||||
cmd = [os.path.abspath(options.script),
|
||||
'-f', os.path.basename(data_filename)]
|
||||
with open(stdout_filename, 'wb') as out:
|
||||
with open(stderr_filename, 'wb') as err:
|
||||
status = subprocess.call(cmd + args,
|
||||
cwd=options.output_directory,
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=out, stderr=err)
|
||||
with open(status_filename, 'w') as status_file:
|
||||
status_file.write('{}\n'.format(status))
|
||||
return stem + "+", data_filename
|
||||
|
||||
### A list of symbols to test with.
|
||||
### This script currently tests what happens when you change a symbol from
|
||||
### having a value to not having a value or vice versa. This is not
|
||||
### necessarily useful behavior, and we may not consider it a bug if
|
||||
### config.py stops handling that case correctly.
|
||||
TEST_SYMBOLS = [
|
||||
'CUSTOM_SYMBOL', # does not exist
|
||||
'PSA_WANT_KEY_TYPE_AES', # set, no value
|
||||
'MBEDTLS_MPI_MAX_SIZE', # unset, has a value
|
||||
'MBEDTLS_NO_UDBL_DIVISION', # unset, in "System support"
|
||||
'MBEDTLS_PLATFORM_ZEROIZE_ALT', # unset, in "Customisation configuration options"
|
||||
]
|
||||
|
||||
def run_all(options):
|
||||
"""Run all the command lines to test."""
|
||||
presets = list_presets(options)
|
||||
for preset in presets:
|
||||
run_one(options, [preset])
|
||||
for symbol in TEST_SYMBOLS:
|
||||
run_one(options, ['get', symbol])
|
||||
(stem, filename) = run_one(options, ['set', symbol])
|
||||
run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
|
||||
run_one(options, ['--force', 'set', symbol])
|
||||
(stem, filename) = run_one(options, ['set', symbol, 'value'])
|
||||
run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
|
||||
run_one(options, ['--force', 'set', symbol, 'value'])
|
||||
run_one(options, ['unset', symbol])
|
||||
|
||||
def main():
|
||||
"""Command line entry point."""
|
||||
parser = argparse.ArgumentParser(description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
parser.add_argument('-d', metavar='DIR',
|
||||
dest='output_directory', required=True,
|
||||
help="""Output directory.""")
|
||||
parser.add_argument('-f', metavar='FILE',
|
||||
dest='input_file', default='include/mbedtls/mbedtls_config.h',
|
||||
help="""Config file (default: %(default)s).""")
|
||||
parser.add_argument('-p', metavar='PRESET,...',
|
||||
dest='presets',
|
||||
help="""Presets to test (default: guessed from --help).""")
|
||||
parser.add_argument('-s', metavar='FILE',
|
||||
dest='script', default='scripts/config.py',
|
||||
help="""Configuration script (default: %(default)s).""")
|
||||
options = parser.parse_args()
|
||||
prepare_directory(options.output_directory)
|
||||
run_all(options)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user