Merge pull request #10624 from gilles-peskine-arm/audit_validity_dates-move-to-framework

Move some scripts to the framework
This commit is contained in:
Gilles Peskine
2026-03-19 12:19:00 +00:00
committed by GitHub
22 changed files with 18 additions and 1769 deletions

View File

@@ -8,9 +8,6 @@ else
DLOPEN_LDFLAGS ?= DLOPEN_LDFLAGS ?=
endif endif
ifdef RECORD_PSA_STATUS_COVERAGE_LOG
LOCAL_CFLAGS += -Werror -DRECORD_PSA_STATUS_COVERAGE_LOG
endif
DEP=${MBEDLIBS} ${MBEDTLS_TEST_OBJS} DEP=${MBEDLIBS} ${MBEDTLS_TEST_OBJS}
# Only build the dlopen test in shared library builds, and not when building # Only build the dlopen test in shared library builds, and not when building

View File

@@ -1,6 +1,7 @@
# Python package requirements for Mbed TLS testing. # Python package requirements for Mbed TLS testing.
-r driver.requirements.txt -r driver.requirements.txt
-r ../framework/scripts/ci.requirements.txt
# The dependencies below are only used in scripts that we run on the Linux CI. # The dependencies below are only used in scripts that we run on the Linux CI.
@@ -16,13 +17,3 @@ pylint == 2.4.4; platform_system == 'Linux'
# https://github.com/Mbed-TLS/mbedtls-framework/issues/50 # https://github.com/Mbed-TLS/mbedtls-framework/issues/50
# mypy 0.942 is the version in Ubuntu 22.04. # mypy 0.942 is the version in Ubuntu 22.04.
mypy == 0.942; platform_system == 'Linux' mypy == 0.942; platform_system == 'Linux'
# At the time of writing, only needed for tests/scripts/audit-validity-dates.py.
# It needs >=35.0.0 for correct operation, and that requires Python >=3.6.
# >=35.0.0 also requires Rust to build from source, which we are forced to do on
# FreeBSD, since PyPI doesn't carry binary wheels for the BSDs.
cryptography >= 35.0.0; platform_system == 'Linux'
# For building `framework/data_files/server9-bad-saltlen.crt` and check python
# files.
asn1crypto; platform_system == 'Linux'

View File

@@ -1,237 +0,0 @@
#!/usr/bin/env python3
"""
Purpose
This script dumps comb table of ec curve. When you add a new ec curve, you
can use this script to generate codes to define `<curve>_T` in ecp_curves.c
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
import os
import subprocess
import sys
import tempfile
HOW_TO_ADD_NEW_CURVE = """
If you are trying to add new curve, you can follow these steps:
1. Define curve parameters (<curve>_p, <curve>_gx, etc...) in ecp_curves.c.
2. Add a macro to define <curve>_T to NULL following these parameters.
3. Build mbedcrypto
4. Run this script with an argument of new curve
5. Copy the output of this script into ecp_curves.c and replace the macro added
in Step 2
6. Rebuild and test if everything is ok
Replace the <curve> in the above with the name of the curve you want to add."""
CC = os.getenv('CC', 'cc')
MBEDTLS_LIBRARY_PATH = os.getenv('MBEDTLS_LIBRARY_PATH', "library")
SRC_DUMP_COMB_TABLE = r'''
#include <stdio.h>
#include <stdlib.h>
#include "mbedtls/ecp.h"
#include "mbedtls/error.h"
static void dump_mpi_initialize( const char *name, const mbedtls_mpi *d )
{
uint8_t buf[128] = {0};
size_t olen;
uint8_t *p;
olen = mbedtls_mpi_size( d );
mbedtls_mpi_write_binary_le( d, buf, olen );
printf("static const mbedtls_mpi_uint %s[] = {\n", name);
for (p = buf; p < buf + olen; p += 8) {
printf( " BYTES_TO_T_UINT_8( 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X ),\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7] );
}
printf("};\n");
}
static void dump_T( const mbedtls_ecp_group *grp )
{
char name[128];
printf( "#if MBEDTLS_ECP_FIXED_POINT_OPTIM == 1\n" );
for (size_t i = 0; i < grp->T_size; ++i) {
snprintf( name, sizeof(name), "%s_T_%zu_X", CURVE_NAME, i );
dump_mpi_initialize( name, &grp->T[i].X );
snprintf( name, sizeof(name), "%s_T_%zu_Y", CURVE_NAME, i );
dump_mpi_initialize( name, &grp->T[i].Y );
}
printf( "static const mbedtls_ecp_point %s_T[%zu] = {\n", CURVE_NAME, grp->T_size );
size_t olen;
for (size_t i = 0; i < grp->T_size; ++i) {
int z;
if ( mbedtls_mpi_cmp_int(&grp->T[i].Z, 0) == 0 ) {
z = 0;
} else if ( mbedtls_mpi_cmp_int(&grp->T[i].Z, 1) == 0 ) {
z = 1;
} else {
fprintf( stderr, "Unexpected value of Z (i = %d)\n", (int)i );
exit( 1 );
}
printf( " ECP_POINT_INIT_XY_Z%d(%s_T_%zu_X, %s_T_%zu_Y),\n",
z,
CURVE_NAME, i,
CURVE_NAME, i
);
}
printf("};\n#endif\n\n");
}
int main()
{
int rc;
mbedtls_mpi m;
mbedtls_ecp_point R;
mbedtls_ecp_group grp;
mbedtls_ecp_group_init( &grp );
rc = mbedtls_ecp_group_load( &grp, CURVE_ID );
if (rc != 0) {
char buf[100];
mbedtls_strerror( rc, buf, sizeof(buf) );
fprintf( stderr, "mbedtls_ecp_group_load: %s (-0x%x)\n", buf, -rc );
return 1;
}
grp.T = NULL;
mbedtls_ecp_point_init( &R );
mbedtls_mpi_init( &m);
mbedtls_mpi_lset( &m, 1 );
rc = mbedtls_ecp_mul( &grp, &R, &m, &grp.G, NULL, NULL );
if ( rc != 0 ) {
char buf[100];
mbedtls_strerror( rc, buf, sizeof(buf) );
fprintf( stderr, "mbedtls_ecp_mul: %s (-0x%x)\n", buf, -rc );
return 1;
}
if ( grp.T == NULL ) {
fprintf( stderr, "grp.T is not generated. Please make sure"
"MBEDTLS_ECP_FIXED_POINT_OPTIM is enabled in mbedtls_config.h\n" );
return 1;
}
dump_T( &grp );
return 0;
}
'''
SRC_DUMP_KNOWN_CURVE = r'''
#include <stdio.h>
#include <stdlib.h>
#include "mbedtls/ecp.h"
int main() {
const mbedtls_ecp_curve_info *info = mbedtls_ecp_curve_list();
mbedtls_ecp_group grp;
mbedtls_ecp_group_init( &grp );
while ( info->name != NULL ) {
mbedtls_ecp_group_load( &grp, info->grp_id );
if ( mbedtls_ecp_get_type(&grp) == MBEDTLS_ECP_TYPE_SHORT_WEIERSTRASS ) {
printf( " %s", info->name );
}
info++;
}
printf( "\n" );
return 0;
}
'''
def join_src_path(*args):
return os.path.normpath(os.path.join(os.path.dirname(__file__), "..", *args))
def run_c_source(src, cflags):
"""
Compile and run C source code
:param src: the c language code to run
:param cflags: additional cflags passing to compiler
:return:
"""
binname = tempfile.mktemp(prefix="mbedtls")
fd, srcname = tempfile.mkstemp(prefix="mbedtls", suffix=".c")
srcfile = os.fdopen(fd, mode="w")
srcfile.write(src)
srcfile.close()
args = [CC,
*cflags,
'-I' + join_src_path("include"),
"-o", binname,
'-L' + MBEDTLS_LIBRARY_PATH,
srcname,
'-lmbedcrypto']
p = subprocess.run(args=args, check=False)
if p.returncode != 0:
return False
p = subprocess.run(args=[binname], check=False, env={
'LD_LIBRARY_PATH': MBEDTLS_LIBRARY_PATH
})
if p.returncode != 0:
return False
os.unlink(srcname)
os.unlink(binname)
return True
def compute_curve(curve):
"""compute comb table for curve"""
r = run_c_source(
SRC_DUMP_COMB_TABLE,
[
'-g',
'-DCURVE_ID=MBEDTLS_ECP_DP_%s' % curve.upper(),
'-DCURVE_NAME="%s"' % curve.lower(),
])
if not r:
print("""\
Unable to compile and run utility.""", file=sys.stderr)
sys.exit(1)
def usage():
print("""
Usage: python %s <curve>...
Arguments:
curve Specify one or more curve names (e.g secp256r1)
All possible curves: """ % sys.argv[0])
run_c_source(SRC_DUMP_KNOWN_CURVE, [])
print("""
Environment Variable:
CC Specify which c compile to use to compile utility.
MBEDTLS_LIBRARY_PATH
Specify the path to mbedcrypto library. (e.g. build/library/)
How to add a new curve: %s""" % HOW_TO_ADD_NEW_CURVE)
def run_main():
shared_lib_path = os.path.normpath(os.path.join(MBEDTLS_LIBRARY_PATH, "libmbedcrypto.so"))
static_lib_path = os.path.normpath(os.path.join(MBEDTLS_LIBRARY_PATH, "libmbedcrypto.a"))
if not os.path.exists(shared_lib_path) and not os.path.exists(static_lib_path):
print("Warning: both '%s' and '%s' are not exists. This script will use "
"the library from your system instead of the library compiled by "
"this source directory.\n"
"You can specify library path using environment variable "
"'MBEDTLS_LIBRARY_PATH'." % (shared_lib_path, static_lib_path),
file=sys.stderr)
if len(sys.argv) <= 1:
usage()
else:
for curve in sys.argv[1:]:
compute_curve(curve)
if __name__ == '__main__':
run_main()

View File

@@ -1,36 +0,0 @@
#!/usr/bin/env perl
# Parse a massif.out.xxx file and output peak total memory usage
#
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
use warnings;
use strict;
use utf8;
use open qw(:std utf8);
die unless @ARGV == 1;
my @snaps;
open my $fh, '<', $ARGV[0] or die;
{ local $/ = 'snapshot='; @snaps = <$fh>; }
close $fh or die;
my ($max, $max_heap, $max_he, $max_stack) = (0, 0, 0, 0);
for (@snaps)
{
my ($heap, $heap_extra, $stack) = m{
mem_heap_B=(\d+)\n
mem_heap_extra_B=(\d+)\n
mem_stacks_B=(\d+)
}xm;
next unless defined $heap;
my $total = $heap + $heap_extra + $stack;
if( $total > $max ) {
($max, $max_heap, $max_he, $max_stack) = ($total, $heap, $heap_extra, $stack);
}
}
printf "$max (heap $max_heap+$max_he, stack $max_stack)\n";

View File

@@ -90,7 +90,7 @@ do_config()
kill $SRV_PID kill $SRV_PID
wait $SRV_PID wait $SRV_PID
scripts/massif_max.pl massif.out.* framework/scripts/massif_max.pl massif.out.*
mv massif.out.* massif-$NAME.$$ mv massif.out.* massif-$NAME.$$
} }

View File

@@ -19,10 +19,6 @@ LOCAL_CFLAGS += $(TF_PSA_CRYPTO_LIBRARY_PRIVATE_INCLUDE)
# on non-POSIX platforms. # on non-POSIX platforms.
LOCAL_CFLAGS += -D_POSIX_C_SOURCE=200809L LOCAL_CFLAGS += -D_POSIX_C_SOURCE=200809L
ifdef RECORD_PSA_STATUS_COVERAGE_LOG
LOCAL_CFLAGS += -Werror -DRECORD_PSA_STATUS_COVERAGE_LOG
endif
GENERATED_MBEDTLS_CONFIG_DATA_FILES := $(patsubst tests/%,%,$(shell \ GENERATED_MBEDTLS_CONFIG_DATA_FILES := $(patsubst tests/%,%,$(shell \
$(PYTHON) ../framework/scripts/generate_config_tests.py --list || \ $(PYTHON) ../framework/scripts/generate_config_tests.py --list || \
echo FAILED \ echo FAILED \
@@ -110,12 +106,6 @@ include/test/test_keys.h: ../framework/scripts/generate_test_keys.py
$(PYTHON) ../framework/scripts/generate_test_keys.py --output $@ $(PYTHON) ../framework/scripts/generate_test_keys.py --output $@
TEST_OBJS_DEPS = $(wildcard include/test/*.h include/test/*/*.h) TEST_OBJS_DEPS = $(wildcard include/test/*.h include/test/*/*.h)
ifdef RECORD_PSA_STATUS_COVERAGE_LOG
# Explicitly depend on this header because on a clean copy of the source tree,
# it doesn't exist yet and must be generated as part of the build, and
# therefore the wildcard enumeration above doesn't include it.
TEST_OBJS_DEPS += ../framework/tests/include/test/instrument_record_status.h
endif
TEST_OBJS_DEPS += include/test/test_certs.h include/test/test_keys.h \ TEST_OBJS_DEPS += include/test/test_certs.h include/test/test_keys.h \
../tf-psa-crypto/tests/include/test/test_keys.h ../tf-psa-crypto/tests/include/test/test_keys.h
@@ -311,9 +301,3 @@ libtestdriver1.a:
fi fi
$(MAKE) -C ./libtestdriver1/library CFLAGS="-I../../ $(CFLAGS)" LDFLAGS="$(LDFLAGS)" libmbedcrypto.a $(MAKE) -C ./libtestdriver1/library CFLAGS="-I../../ $(CFLAGS)" LDFLAGS="$(LDFLAGS)" libmbedcrypto.a
cp ./libtestdriver1/library/libmbedcrypto.a ../library/libtestdriver1.a cp ./libtestdriver1/library/libmbedcrypto.a ../library/libtestdriver1.a
ifdef RECORD_PSA_STATUS_COVERAGE_LOG
../framework/tests/include/test/instrument_record_status.h: ../tf-psa-crypto/include/psa/crypto.h Makefile
echo " Gen $@"
sed <../tf-psa-crypto/include/psa/crypto.h >$@ -n 's/^psa_status_t \([A-Za-z0-9_]*\)(.*/#define \1(...) RECORD_STATUS("\1", \1(__VA_ARGS__))/p'
endif

View File

@@ -1,469 +0,0 @@
#!/usr/bin/env python3
#
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
"""Audit validity date of X509 crt/crl/csr.
This script is used to audit the validity date of crt/crl/csr used for testing.
It prints the information about X.509 objects excluding the objects that
are valid throughout the desired validity period. The data are collected
from framework/data_files/ and tests/suites/*.data files by default.
"""
import os
import re
import typing
import argparse
import datetime
import glob
import logging
import hashlib
from enum import Enum
# The script requires cryptography >= 35.0.0 which is only available
# for Python >= 3.6.
import cryptography
from cryptography import x509
from generate_test_code import FileWrapper
import scripts_path # pylint: disable=unused-import
from mbedtls_framework import build_tree
from mbedtls_framework import logging_util
def check_cryptography_version():
match = re.match(r'^[0-9]+', cryptography.__version__)
if match is None or int(match.group(0)) < 35:
raise Exception("audit-validity-dates requires cryptography >= 35.0.0"
+ "({} is too old)".format(cryptography.__version__))
class DataType(Enum):
CRT = 1 # Certificate
CRL = 2 # Certificate Revocation List
CSR = 3 # Certificate Signing Request
class DataFormat(Enum):
PEM = 1 # Privacy-Enhanced Mail
DER = 2 # Distinguished Encoding Rules
class AuditData:
"""Store data location, type and validity period of X.509 objects."""
#pylint: disable=too-few-public-methods
def __init__(self, data_type: DataType, x509_obj):
self.data_type = data_type
# the locations that the x509 object could be found
self.locations = [] # type: typing.List[str]
self.fill_validity_duration(x509_obj)
self._obj = x509_obj
encoding = cryptography.hazmat.primitives.serialization.Encoding.DER
self._identifier = hashlib.sha1(self._obj.public_bytes(encoding)).hexdigest()
@property
def identifier(self):
"""
Identifier of the underlying X.509 object, which is consistent across
different runs.
"""
return self._identifier
def fill_validity_duration(self, x509_obj):
"""Read validity period from an X.509 object."""
# Certificate expires after "not_valid_after"
# Certificate is invalid before "not_valid_before"
if self.data_type == DataType.CRT:
self.not_valid_after = x509_obj.not_valid_after
self.not_valid_before = x509_obj.not_valid_before
# CertificateRevocationList expires after "next_update"
# CertificateRevocationList is invalid before "last_update"
elif self.data_type == DataType.CRL:
self.not_valid_after = x509_obj.next_update
self.not_valid_before = x509_obj.last_update
# CertificateSigningRequest is always valid.
elif self.data_type == DataType.CSR:
self.not_valid_after = datetime.datetime.max
self.not_valid_before = datetime.datetime.min
else:
raise ValueError("Unsupported file_type: {}".format(self.data_type))
class X509Parser:
"""A parser class to parse crt/crl/csr file or data in PEM/DER format."""
PEM_REGEX = br'-{5}BEGIN (?P<type>.*?)-{5}(?P<data>.*?)-{5}END (?P=type)-{5}'
PEM_TAG_REGEX = br'-{5}BEGIN (?P<type>.*?)-{5}\n'
PEM_TAGS = {
DataType.CRT: 'CERTIFICATE',
DataType.CRL: 'X509 CRL',
DataType.CSR: 'CERTIFICATE REQUEST'
}
def __init__(self,
backends:
typing.Dict[DataType,
typing.Dict[DataFormat,
typing.Callable[[bytes], object]]]) \
-> None:
self.backends = backends
self.__generate_parsers()
def __generate_parser(self, data_type: DataType):
"""Parser generator for a specific DataType"""
tag = self.PEM_TAGS[data_type]
pem_loader = self.backends[data_type][DataFormat.PEM]
der_loader = self.backends[data_type][DataFormat.DER]
def wrapper(data: bytes):
pem_type = X509Parser.pem_data_type(data)
# It is in PEM format with target tag
if pem_type == tag:
return pem_loader(data)
# It is in PEM format without target tag
if pem_type:
return None
# It might be in DER format
try:
result = der_loader(data)
except ValueError:
result = None
return result
wrapper.__name__ = "{}.parser[{}]".format(type(self).__name__, tag)
return wrapper
def __generate_parsers(self):
"""Generate parsers for all support DataType"""
self.parsers = {}
for data_type, _ in self.PEM_TAGS.items():
self.parsers[data_type] = self.__generate_parser(data_type)
def __getitem__(self, item):
return self.parsers[item]
@staticmethod
def pem_data_type(data: bytes) -> typing.Optional[str]:
"""Get the tag from the data in PEM format
:param data: data to be checked in binary mode.
:return: PEM tag or "" when no tag detected.
"""
m = re.search(X509Parser.PEM_TAG_REGEX, data)
if m is not None:
return m.group('type').decode('UTF-8')
else:
return None
@staticmethod
def check_hex_string(hex_str: str) -> bool:
"""Check if the hex string is possibly DER data."""
hex_len = len(hex_str)
# At least 6 hex char for 3 bytes: Type + Length + Content
if hex_len < 6:
return False
# Check if Type (1 byte) is SEQUENCE.
if hex_str[0:2] != '30':
return False
# Check LENGTH (1 byte) value
content_len = int(hex_str[2:4], base=16)
consumed = 4
if content_len in (128, 255):
# Indefinite or Reserved
return False
elif content_len > 127:
# Definite, Long
length_len = (content_len - 128) * 2
content_len = int(hex_str[consumed:consumed+length_len], base=16)
consumed += length_len
# Check LENGTH
if hex_len != content_len * 2 + consumed:
return False
return True
class Auditor:
"""
A base class that uses X509Parser to parse files to a list of AuditData.
A subclass must implement the following methods:
- collect_default_files: Return a list of file names that are defaultly
used for parsing (auditing). The list will be stored in
Auditor.default_files.
- parse_file: Method that parses a single file to a list of AuditData.
A subclass may override the following methods:
- parse_bytes: Defaultly, it parses `bytes` that contains only one valid
X.509 data(DER/PEM format) to an X.509 object.
- walk_all: Defaultly, it iterates over all the files in the provided
file name list, calls `parse_file` for each file and stores the results
by extending the `results` passed to the function.
"""
def __init__(self, logger):
self.logger = logger
self.default_files = self.collect_default_files()
self.parser = X509Parser({
DataType.CRT: {
DataFormat.PEM: x509.load_pem_x509_certificate,
DataFormat.DER: x509.load_der_x509_certificate
},
DataType.CRL: {
DataFormat.PEM: x509.load_pem_x509_crl,
DataFormat.DER: x509.load_der_x509_crl
},
DataType.CSR: {
DataFormat.PEM: x509.load_pem_x509_csr,
DataFormat.DER: x509.load_der_x509_csr
},
})
def collect_default_files(self) -> typing.List[str]:
"""Collect the default files for parsing."""
raise NotImplementedError
def parse_file(self, filename: str) -> typing.List[AuditData]:
"""
Parse a list of AuditData from file.
:param filename: name of the file to parse.
:return list of AuditData parsed from the file.
"""
raise NotImplementedError
def parse_bytes(self, data: bytes):
"""Parse AuditData from bytes."""
for data_type in list(DataType):
try:
result = self.parser[data_type](data)
except ValueError as val_error:
result = None
self.logger.warning(val_error)
if result is not None:
audit_data = AuditData(data_type, result)
return audit_data
return None
def walk_all(self,
results: typing.Dict[str, AuditData],
file_list: typing.Optional[typing.List[str]] = None) \
-> None:
"""
Iterate over all the files in the list and get audit data. The
results will be written to `results` passed to this function.
:param results: The dictionary used to store the parsed
AuditData. The keys of this dictionary should
be the identifier of the AuditData.
"""
if file_list is None:
file_list = self.default_files
for filename in file_list:
data_list = self.parse_file(filename)
for d in data_list:
if d.identifier in results:
results[d.identifier].locations.extend(d.locations)
else:
results[d.identifier] = d
@staticmethod
def find_test_dir():
"""Get the relative path for the Mbed TLS test directory."""
return os.path.relpath(build_tree.guess_mbedtls_root() + '/tests')
class TestDataAuditor(Auditor):
"""Class for auditing files in `framework/data_files/`"""
def collect_default_files(self):
"""Collect all files in `framework/data_files/`"""
test_data_glob = os.path.join(build_tree.guess_mbedtls_root(),
'framework', 'data_files/**')
data_files = [f for f in glob.glob(test_data_glob, recursive=True)
if os.path.isfile(f)]
return data_files
def parse_file(self, filename: str) -> typing.List[AuditData]:
"""
Parse a list of AuditData from data file.
:param filename: name of the file to parse.
:return list of AuditData parsed from the file.
"""
with open(filename, 'rb') as f:
data = f.read()
results = []
# Try to parse all PEM blocks.
is_pem = False
for idx, m in enumerate(re.finditer(X509Parser.PEM_REGEX, data, flags=re.S), 1):
is_pem = True
result = self.parse_bytes(data[m.start():m.end()])
if result is not None:
result.locations.append("{}#{}".format(filename, idx))
results.append(result)
# Might be DER format.
if not is_pem:
result = self.parse_bytes(data)
if result is not None:
result.locations.append("{}".format(filename))
results.append(result)
return results
def parse_suite_data(data_f):
"""
Parses .data file for test arguments that possiblly have a
valid X.509 data. If you need a more precise parser, please
use generate_test_code.parse_test_data instead.
:param data_f: file object of the data file.
:return: Generator that yields test function argument list.
"""
for line in data_f:
line = line.strip()
# Skip comments
if line.startswith('#'):
continue
# Check parameters line
match = re.search(r'\A\w+(.*:)?\"', line)
if match:
# Read test vectors
parts = re.split(r'(?<!\\):', line)
parts = [x for x in parts if x]
args = parts[1:]
yield args
class SuiteDataAuditor(Auditor):
"""Class for auditing files in `tests/suites/*.data`"""
def collect_default_files(self):
"""Collect all files in `tests/suites/*.data`"""
test_dir = self.find_test_dir()
suites_data_folder = os.path.join(test_dir, 'suites')
data_files = glob.glob(os.path.join(suites_data_folder, '*.data'))
return data_files
def parse_file(self, filename: str):
"""
Parse a list of AuditData from test suite data file.
:param filename: name of the file to parse.
:return list of AuditData parsed from the file.
"""
audit_data_list = []
data_f = FileWrapper(filename)
for test_args in parse_suite_data(data_f):
for idx, test_arg in enumerate(test_args):
match = re.match(r'"(?P<data>[0-9a-fA-F]+)"', test_arg)
if not match:
continue
if not X509Parser.check_hex_string(match.group('data')):
continue
audit_data = self.parse_bytes(bytes.fromhex(match.group('data')))
if audit_data is None:
continue
audit_data.locations.append("{}:{}:#{}".format(filename,
data_f.line_no,
idx + 1))
audit_data_list.append(audit_data)
return audit_data_list
def list_all(audit_data: AuditData):
for loc in audit_data.locations:
print("{}\t{:20}\t{:20}\t{:3}\t{}".format(
audit_data.identifier,
audit_data.not_valid_before.isoformat(timespec='seconds'),
audit_data.not_valid_after.isoformat(timespec='seconds'),
audit_data.data_type.name,
loc))
def main():
"""
Perform argument parsing.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-a', '--all',
action='store_true',
help='list the information of all the files')
parser.add_argument('-v', '--verbose',
action='store_true', dest='verbose',
help='show logs')
parser.add_argument('--from', dest='start_date',
help=('Start of desired validity period (UTC, YYYY-MM-DD). '
'Default: today'),
metavar='DATE')
parser.add_argument('--to', dest='end_date',
help=('End of desired validity period (UTC, YYYY-MM-DD). '
'Default: --from'),
metavar='DATE')
parser.add_argument('--data-files', action='append', nargs='*',
help='data files to audit',
metavar='FILE')
parser.add_argument('--suite-data-files', action='append', nargs='*',
help='suite data files to audit',
metavar='FILE')
args = parser.parse_args()
# start main routine
# setup logger
logger = logging.getLogger()
logging_util.configure_logger(logger)
logger.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
td_auditor = TestDataAuditor(logger)
sd_auditor = SuiteDataAuditor(logger)
data_files = []
suite_data_files = []
if args.data_files is None and args.suite_data_files is None:
data_files = td_auditor.default_files
suite_data_files = sd_auditor.default_files
else:
if args.data_files is not None:
data_files = [x for l in args.data_files for x in l]
if args.suite_data_files is not None:
suite_data_files = [x for l in args.suite_data_files for x in l]
# validity period start date
if args.start_date:
start_date = datetime.datetime.fromisoformat(args.start_date)
else:
start_date = datetime.datetime.today()
# validity period end date
if args.end_date:
end_date = datetime.datetime.fromisoformat(args.end_date)
else:
end_date = start_date
# go through all the files
audit_results = {}
td_auditor.walk_all(audit_results, data_files)
sd_auditor.walk_all(audit_results, suite_data_files)
logger.info("Total: {} objects found!".format(len(audit_results)))
# we filter out the files whose validity duration covers the provided
# duration.
filter_func = lambda d: (start_date < d.not_valid_before) or \
(d.not_valid_after < end_date)
sortby_end = lambda d: d.not_valid_after
if args.all:
filter_func = None
# filter and output the results
for d in sorted(filter(filter_func, audit_results.values()), key=sortby_end):
list_all(d)
logger.debug("Done!")
check_cryptography_version()
if __name__ == "__main__":
main()

View File

@@ -462,7 +462,7 @@ component_test_everest () {
make test make test
msg "test: metatests (clang, ASan)" msg "test: metatests (clang, ASan)"
tests/scripts/run-metatests.sh any asan poison framework/scripts/run-metatests.sh any asan poison
msg "test: Everest ECDH context - ECDH-related part of ssl-opt.sh (ASan build)" # ~ 5s msg "test: Everest ECDH context - ECDH-related part of ssl-opt.sh (ASan build)" # ~ 5s
tests/ssl-opt.sh -f ECDH tests/ssl-opt.sh -f ECDH
@@ -493,15 +493,6 @@ component_test_everest_curve25519_only () {
ctest ctest
} }
component_test_psa_collect_statuses () {
msg "build+test: psa_collect_statuses" # ~30s
scripts/config.py full
tests/scripts/psa_collect_statuses.py
# Check that psa_crypto_init() succeeded at least once
grep -q '^0:psa_crypto_init:' tests/statuses.log
rm -f tests/statuses.log
}
# Check that the specified libraries exist and are empty. # Check that the specified libraries exist and are empty.
are_empty_libraries () { are_empty_libraries () {
nm "$@" >/dev/null 2>/dev/null nm "$@" >/dev/null 2>/dev/null

View File

@@ -22,7 +22,7 @@ component_test_default_out_of_box () {
programs/test/selftest programs/test/selftest
msg "program demos: make, default config (out-of-box)" # ~10s msg "program demos: make, default config (out-of-box)" # ~10s
tests/scripts/run_demos.py framework/scripts/run_demos.py
} }
component_test_default_cmake_gcc_asan () { component_test_default_cmake_gcc_asan () {
@@ -34,13 +34,13 @@ component_test_default_cmake_gcc_asan () {
make test make test
msg "program demos (ASan build)" # ~10s msg "program demos (ASan build)" # ~10s
tests/scripts/run_demos.py framework/scripts/run_demos.py
msg "test: selftest (ASan build)" # ~ 10s msg "test: selftest (ASan build)" # ~ 10s
programs/test/selftest programs/test/selftest
msg "test: metatests (GCC, ASan build)" msg "test: metatests (GCC, ASan build)"
tests/scripts/run-metatests.sh any asan poison framework/scripts/run-metatests.sh any asan poison
msg "test: ssl-opt.sh (ASan build)" # ~ 1 min msg "test: ssl-opt.sh (ASan build)" # ~ 1 min
tests/ssl-opt.sh tests/ssl-opt.sh
@@ -143,10 +143,10 @@ component_test_full_cmake_clang () {
programs/test/cpp_dummy_build programs/test/cpp_dummy_build
msg "test: metatests (clang)" msg "test: metatests (clang)"
tests/scripts/run-metatests.sh any pthread framework/scripts/run-metatests.sh any pthread
msg "program demos (full config, clang)" # ~10s msg "program demos (full config, clang)" # ~10s
tests/scripts/run_demos.py framework/scripts/run_demos.py
msg "test: psa_constant_names (full config, clang)" # ~ 1s msg "test: psa_constant_names (full config, clang)" # ~ 1s
$FRAMEWORK/scripts/test_psa_constant_names.py $FRAMEWORK/scripts/test_psa_constant_names.py
@@ -214,7 +214,7 @@ component_test_full_deprecated_warning () {
$MAKE_COMMAND test $MAKE_COMMAND test
msg "program demos: full config + MBEDTLS_TEST_DEPRECATED" # ~10s msg "program demos: full config + MBEDTLS_TEST_DEPRECATED" # ~10s
tests/scripts/run_demos.py framework/scripts/run_demos.py
} }
component_build_baremetal () { component_build_baremetal () {

View File

@@ -330,7 +330,7 @@ component_test_arm_linux_gnueabi_gcc_arm5vte () {
programs/test/selftest programs/test/selftest
msg "program demos: make, default config (out-of-box)" # ~0s msg "program demos: make, default config (out-of-box)" # ~0s
tests/scripts/run_demos.py framework/scripts/run_demos.py
} }
support_test_arm_linux_gnueabi_gcc_arm5vte () { support_test_arm_linux_gnueabi_gcc_arm5vte () {
@@ -350,7 +350,7 @@ component_test_arm_linux_gnueabi_gcc_thumb_1_opt_0 () {
programs/test/selftest programs/test/selftest
msg "program demos: make, default config (out-of-box)" # ~0s msg "program demos: make, default config (out-of-box)" # ~0s
tests/scripts/run_demos.py framework/scripts/run_demos.py
} }
support_test_arm_linux_gnueabi_gcc_thumb_1_opt_0 () { support_test_arm_linux_gnueabi_gcc_thumb_1_opt_0 () {
@@ -368,7 +368,7 @@ component_test_arm_linux_gnueabi_gcc_thumb_1_opt_s () {
programs/test/selftest programs/test/selftest
msg "program demos: make, default config (out-of-box)" # ~0s msg "program demos: make, default config (out-of-box)" # ~0s
tests/scripts/run_demos.py framework/scripts/run_demos.py
} }
support_test_arm_linux_gnueabi_gcc_thumb_1_opt_s () { support_test_arm_linux_gnueabi_gcc_thumb_1_opt_s () {
@@ -386,7 +386,7 @@ component_test_arm_linux_gnueabihf_gcc_armv7 () {
programs/test/selftest programs/test/selftest
msg "program demos: make, default config (out-of-box)" # ~0s msg "program demos: make, default config (out-of-box)" # ~0s
tests/scripts/run_demos.py framework/scripts/run_demos.py
} }
support_test_arm_linux_gnueabihf_gcc_armv7 () { support_test_arm_linux_gnueabihf_gcc_armv7 () {
@@ -404,7 +404,7 @@ component_test_arm_linux_gnueabihf_gcc_thumb_2 () {
programs/test/selftest programs/test/selftest
msg "program demos: make, default config (out-of-box)" # ~0s msg "program demos: make, default config (out-of-box)" # ~0s
tests/scripts/run_demos.py framework/scripts/run_demos.py
} }
support_test_arm_linux_gnueabihf_gcc_thumb_2 () { support_test_arm_linux_gnueabihf_gcc_thumb_2 () {
@@ -422,7 +422,7 @@ component_test_aarch64_linux_gnu_gcc () {
programs/test/selftest programs/test/selftest
msg "program demos: make, default config (out-of-box)" # ~0s msg "program demos: make, default config (out-of-box)" # ~0s
tests/scripts/run_demos.py framework/scripts/run_demos.py
} }
support_test_aarch64_linux_gnu_gcc () { support_test_aarch64_linux_gnu_gcc () {

View File

@@ -132,10 +132,10 @@ component_test_memsan () {
make test make test
msg "test: metatests (MSan)" msg "test: metatests (MSan)"
tests/scripts/run-metatests.sh any msan framework/scripts/run-metatests.sh any msan
msg "program demos (MSan)" # ~20s msg "program demos (MSan)" # ~20s
tests/scripts/run_demos.py framework/scripts/run_demos.py
msg "test: ssl-opt.sh (MSan)" # ~ 1 min msg "test: ssl-opt.sh (MSan)" # ~ 1 min
tests/ssl-opt.sh tests/ssl-opt.sh

View File

@@ -1,96 +0,0 @@
#!/usr/bin/env perl
#
# Based on NIST CTR_DRBG.rsp validation file
# Only uses AES-256-CTR cases that use a Derivation function
# and concats nonce and personalization for initialization.
#
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
use strict;
my $file = shift;
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
sub get_suite_val($)
{
my $name = shift;
my $val = "";
my $line = <TEST_DATA>;
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
return $val;
}
sub get_val($)
{
my $name = shift;
my $val = "";
my $line;
while($line = <TEST_DATA>)
{
next if($line !~ /=/);
last;
}
($val) = ($line =~ /^$name = (\w+)/);
return $val;
}
my $cnt = 1;;
while (my $line = <TEST_DATA>)
{
next if ($line !~ /^\[AES-256 use df/);
my $PredictionResistanceStr = get_suite_val("PredictionResistance");
my $PredictionResistance = 0;
$PredictionResistance = 1 if ($PredictionResistanceStr eq 'True');
my $EntropyInputLen = get_suite_val("EntropyInputLen");
my $NonceLen = get_suite_val("NonceLen");
my $PersonalizationStringLen = get_suite_val("PersonalizationStringLen");
my $AdditionalInputLen = get_suite_val("AdditionalInputLen");
for ($cnt = 0; $cnt < 15; $cnt++)
{
my $Count = get_val("COUNT");
my $EntropyInput = get_val("EntropyInput");
my $Nonce = get_val("Nonce");
my $PersonalizationString = get_val("PersonalizationString");
my $AdditionalInput1 = get_val("AdditionalInput");
my $EntropyInputPR1 = get_val("EntropyInputPR") if ($PredictionResistance == 1);
my $EntropyInputReseed = get_val("EntropyInputReseed") if ($PredictionResistance == 0);
my $AdditionalInputReseed = get_val("AdditionalInputReseed") if ($PredictionResistance == 0);
my $AdditionalInput2 = get_val("AdditionalInput");
my $EntropyInputPR2 = get_val("EntropyInputPR") if ($PredictionResistance == 1);
my $ReturnedBits = get_val("ReturnedBits");
if ($PredictionResistance == 1)
{
print("CTR_DRBG NIST Validation (AES-256 use df,$PredictionResistanceStr,$EntropyInputLen,$NonceLen,$PersonalizationStringLen,$AdditionalInputLen) #$Count\n");
print("ctr_drbg_validate_pr");
print(":\"$Nonce$PersonalizationString\"");
print(":\"$EntropyInput$EntropyInputPR1$EntropyInputPR2\"");
print(":\"$AdditionalInput1\"");
print(":\"$AdditionalInput2\"");
print(":\"$ReturnedBits\"");
print("\n\n");
}
else
{
print("CTR_DRBG NIST Validation (AES-256 use df,$PredictionResistanceStr,$EntropyInputLen,$NonceLen,$PersonalizationStringLen,$AdditionalInputLen) #$Count\n");
print("ctr_drbg_validate_nopr");
print(":\"$Nonce$PersonalizationString\"");
print(":\"$EntropyInput$EntropyInputReseed\"");
print(":\"$AdditionalInput1\"");
print(":\"$AdditionalInputReseed\"");
print(":\"$AdditionalInput2\"");
print(":\"$ReturnedBits\"");
print("\n\n");
}
}
}
close(TEST_DATA);

View File

@@ -1,101 +0,0 @@
#!/usr/bin/env perl
#
# Based on NIST gcmDecryptxxx.rsp validation files
# Only first 3 of every set used for compile time saving
#
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
use strict;
my $file = shift;
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
sub get_suite_val($)
{
my $name = shift;
my $val = "";
while(my $line = <TEST_DATA>)
{
next if ($line !~ /^\[/);
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
last;
}
return $val;
}
sub get_val($)
{
my $name = shift;
my $val = "";
my $line;
while($line = <TEST_DATA>)
{
next if($line !~ /=/);
last;
}
($val) = ($line =~ /^$name = (\w+)/);
return $val;
}
sub get_val_or_fail($)
{
my $name = shift;
my $val = "FAIL";
my $line;
while($line = <TEST_DATA>)
{
next if($line !~ /=/ && $line !~ /FAIL/);
last;
}
($val) = ($line =~ /^$name = (\w+)/) if ($line =~ /=/);
return $val;
}
my $cnt = 1;;
while (my $line = <TEST_DATA>)
{
my $key_len = get_suite_val("Keylen");
next if ($key_len !~ /\d+/);
my $iv_len = get_suite_val("IVlen");
my $pt_len = get_suite_val("PTlen");
my $add_len = get_suite_val("AADlen");
my $tag_len = get_suite_val("Taglen");
for ($cnt = 0; $cnt < 3; $cnt++)
{
my $Count = get_val("Count");
my $key = get_val("Key");
my $iv = get_val("IV");
my $ct = get_val("CT");
my $add = get_val("AAD");
my $tag = get_val("Tag");
my $pt = get_val_or_fail("PT");
print("GCM NIST Validation (AES-$key_len,$iv_len,$pt_len,$add_len,$tag_len) #$Count\n");
print("gcm_decrypt_and_verify");
print(":\"$key\"");
print(":\"$ct\"");
print(":\"$iv\"");
print(":\"$add\"");
print(":$tag_len");
print(":\"$tag\"");
print(":\"$pt\"");
print(":0");
print("\n\n");
}
}
print("GCM Selftest\n");
print("gcm_selftest:\n\n");
close(TEST_DATA);

View File

@@ -1,84 +0,0 @@
#!/usr/bin/env perl
#
# Based on NIST gcmEncryptIntIVxxx.rsp validation files
# Only first 3 of every set used for compile time saving
#
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
use strict;
my $file = shift;
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
sub get_suite_val($)
{
my $name = shift;
my $val = "";
while(my $line = <TEST_DATA>)
{
next if ($line !~ /^\[/);
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
last;
}
return $val;
}
sub get_val($)
{
my $name = shift;
my $val = "";
my $line;
while($line = <TEST_DATA>)
{
next if($line !~ /=/);
last;
}
($val) = ($line =~ /^$name = (\w+)/);
return $val;
}
my $cnt = 1;;
while (my $line = <TEST_DATA>)
{
my $key_len = get_suite_val("Keylen");
next if ($key_len !~ /\d+/);
my $iv_len = get_suite_val("IVlen");
my $pt_len = get_suite_val("PTlen");
my $add_len = get_suite_val("AADlen");
my $tag_len = get_suite_val("Taglen");
for ($cnt = 0; $cnt < 3; $cnt++)
{
my $Count = get_val("Count");
my $key = get_val("Key");
my $pt = get_val("PT");
my $add = get_val("AAD");
my $iv = get_val("IV");
my $ct = get_val("CT");
my $tag = get_val("Tag");
print("GCM NIST Validation (AES-$key_len,$iv_len,$pt_len,$add_len,$tag_len) #$Count\n");
print("gcm_encrypt_and_tag");
print(":\"$key\"");
print(":\"$pt\"");
print(":\"$iv\"");
print(":\"$add\"");
print(":\"$ct\"");
print(":$tag_len");
print(":\"$tag\"");
print(":0");
print("\n\n");
}
}
print("GCM Selftest\n");
print("gcm_selftest:\n\n");
close(TEST_DATA);

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env perl
#
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
use strict;
my $file = shift;
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
sub get_val($$)
{
my $str = shift;
my $name = shift;
my $val = "";
while(my $line = <TEST_DATA>)
{
next if($line !~ /^# $str/);
last;
}
while(my $line = <TEST_DATA>)
{
last if($line eq "\r\n");
$val .= $line;
}
$val =~ s/[ \r\n]//g;
return $val;
}
my $state = 0;
my $val_n = "";
my $val_e = "";
my $val_p = "";
my $val_q = "";
my $mod = 0;
my $cnt = 1;
while (my $line = <TEST_DATA>)
{
next if ($line !~ /^# Example/);
( $mod ) = ($line =~ /A (\d+)/);
$val_n = get_val("RSA modulus n", "N");
$val_e = get_val("RSA public exponent e", "E");
$val_p = get_val("Prime p", "P");
$val_q = get_val("Prime q", "Q");
for(my $i = 1; $i <= 6; $i++)
{
my $val_m = get_val("Message to be", "M");
my $val_salt = get_val("Salt", "Salt");
my $val_sig = get_val("Signature", "Sig");
print("RSASSA-PSS Signature Example ${cnt}_${i}\n");
print("pkcs1_rsassa_pss_sign:$mod:16:\"$val_p\":16:\"$val_q\":16:\"$val_n\":16:\"$val_e\":SIG_RSA_SHA1:MBEDTLS_MD_SHA1");
print(":\"$val_m\"");
print(":\"$val_salt\"");
print(":\"$val_sig\":0");
print("\n\n");
print("RSASSA-PSS Signature Example ${cnt}_${i} (verify)\n");
print("pkcs1_rsassa_pss_verify:$mod:16:\"$val_n\":16:\"$val_e\":SIG_RSA_SHA1:MBEDTLS_MD_SHA1");
print(":\"$val_m\"");
print(":\"$val_salt\"");
print(":\"$val_sig\":0");
print("\n\n");
}
$cnt++;
}
close(TEST_DATA);

View File

@@ -1,71 +0,0 @@
#!/bin/sh
# This script splits the data test files containing the test cases into
# individual files (one test case per file) suitable for use with afl
# (American Fuzzy Lop). http://lcamtuf.coredump.cx/afl/
#
# Usage: generate-afl-tests.sh <test data file path>
# <test data file path> - should be the path to one of the test suite files
# such as 'test_suite_rsa.data'
#
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
# Abort on errors
set -e
if [ -z $1 ]
then
echo " [!] No test file specified" >&2
echo "Usage: $0 <test data file>" >&2
exit 1
fi
SRC_FILEPATH=$(dirname $1)/$(basename $1)
TESTSUITE=$(basename $1 .data)
THIS_DIR=$(basename $PWD)
if [ -d ../library -a -d ../include -a -d ../tests -a $THIS_DIR == "tests" ];
then :;
else
echo " [!] Must be run from Mbed TLS tests directory" >&2
exit 1
fi
DEST_TESTCASE_DIR=$TESTSUITE-afl-tests
DEST_OUTPUT_DIR=$TESTSUITE-afl-out
echo " [+] Creating output directories" >&2
if [ -e $DEST_OUTPUT_DIR/* ];
then :
echo " [!] Test output files already exist." >&2
exit 1
else
mkdir -p $DEST_OUTPUT_DIR
fi
if [ -e $DEST_TESTCASE_DIR/* ];
then :
echo " [!] Test output files already exist." >&2
else
mkdir -p $DEST_TESTCASE_DIR
fi
echo " [+] Creating test cases" >&2
cd $DEST_TESTCASE_DIR
split -p '^\s*$' ../$SRC_FILEPATH
for f in *;
do
# Strip out any blank lines (no trim on OS X)
sed '/^\s*$/d' $f >testcase_$f
rm $f
done
cd ..
echo " [+] Test cases in $DEST_TESTCASE_DIR" >&2

View File

@@ -1,87 +0,0 @@
#!/usr/bin/env python3
"""Generate server9-bad-saltlen.crt
Generate a certificate signed with RSA-PSS, with an incorrect salt length.
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
import subprocess
import argparse
from asn1crypto import pem, x509, core #type: ignore #pylint: disable=import-error
OPENSSL_RSA_PSS_CERT_COMMAND = r'''
openssl x509 -req -CA {ca_name}.crt -CAkey {ca_name}.key -set_serial 24 {ca_password} \
{openssl_extfile} -days 3650 -outform DER -in {csr} \
-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{anounce_saltlen} \
-sigopt rsa_mgf1_md:sha256
'''
SIG_OPT = \
r'-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{saltlen} -sigopt rsa_mgf1_md:sha256'
OPENSSL_RSA_PSS_DGST_COMMAND = r'''openssl dgst -sign {ca_name}.key {ca_password} \
-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{actual_saltlen} \
-sigopt rsa_mgf1_md:sha256'''
def auto_int(x):
return int(x, 0)
def build_argparser(parser):
"""Build argument parser"""
parser.description = __doc__
parser.add_argument('--ca-name', type=str, required=True,
help='Basename of CA files')
parser.add_argument('--ca-password', type=str,
required=True, help='CA key file password')
parser.add_argument('--csr', type=str, required=True,
help='CSR file for generating certificate')
parser.add_argument('--openssl-extfile', type=str,
required=True, help='X905 v3 extension config file')
parser.add_argument('--anounce_saltlen', type=auto_int,
required=True, help='Announced salt length')
parser.add_argument('--actual_saltlen', type=auto_int,
required=True, help='Actual salt length')
parser.add_argument('--output', type=str, required=True)
def main():
parser = argparse.ArgumentParser()
build_argparser(parser)
args = parser.parse_args()
return generate(**vars(args))
def generate(**kwargs):
"""Generate different salt length certificate file."""
ca_password = kwargs.get('ca_password', '')
if ca_password:
kwargs['ca_password'] = r'-passin "pass:{ca_password}"'.format(
**kwargs)
else:
kwargs['ca_password'] = ''
extfile = kwargs.get('openssl_extfile', '')
if extfile:
kwargs['openssl_extfile'] = '-extfile {openssl_extfile}'.format(
**kwargs)
else:
kwargs['openssl_extfile'] = ''
cmd = OPENSSL_RSA_PSS_CERT_COMMAND.format(**kwargs)
der_bytes = subprocess.check_output(cmd, shell=True)
target_certificate = x509.Certificate.load(der_bytes)
cmd = OPENSSL_RSA_PSS_DGST_COMMAND.format(**kwargs)
#pylint: disable=unexpected-keyword-arg
der_bytes = subprocess.check_output(cmd,
input=target_certificate['tbs_certificate'].dump(),
shell=True)
with open(kwargs.get('output'), 'wb') as f:
target_certificate['signature_value'] = core.OctetBitString(der_bytes)
f.write(pem.armor('CERTIFICATE', target_certificate.dump()))
if __name__ == '__main__':
main()

View File

@@ -1,130 +0,0 @@
#!/usr/bin/env python3
"""Describe the test coverage of PSA functions in terms of return statuses.
1. Build Mbed TLS with -DRECORD_PSA_STATUS_COVERAGE_LOG
2. Run psa_collect_statuses.py
The output is a series of line of the form "psa_foo PSA_ERROR_XXX". Each
function/status combination appears only once.
This script must be run from the top of an Mbed TLS source tree.
The build command is "make -DRECORD_PSA_STATUS_COVERAGE_LOG", which is
only supported with make (as opposed to CMake or other build methods).
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
import argparse
import os
import subprocess
import sys
DEFAULT_STATUS_LOG_FILE = 'tests/statuses.log'
DEFAULT_PSA_CONSTANT_NAMES = 'tf-psa-crypto/programs/psa/psa_constant_names'
class Statuses:
"""Information about observed return statues of API functions."""
def __init__(self):
self.functions = {}
self.codes = set()
self.status_names = {}
def collect_log(self, log_file_name):
"""Read logs from RECORD_PSA_STATUS_COVERAGE_LOG.
Read logs produced by running Mbed TLS test suites built with
-DRECORD_PSA_STATUS_COVERAGE_LOG.
"""
with open(log_file_name) as log:
for line in log:
value, function, tail = line.split(':', 2)
if function not in self.functions:
self.functions[function] = {}
fdata = self.functions[function]
if value not in self.functions[function]:
fdata[value] = []
fdata[value].append(tail)
self.codes.add(int(value))
def get_constant_names(self, psa_constant_names):
"""Run psa_constant_names to obtain names for observed numerical values."""
values = [str(value) for value in self.codes]
cmd = [psa_constant_names, 'status'] + values
output = subprocess.check_output(cmd).decode('ascii')
for value, name in zip(values, output.rstrip().split('\n')):
self.status_names[value] = name
def report(self):
"""Report observed return values for each function.
The report is a series of line of the form "psa_foo PSA_ERROR_XXX".
"""
for function in sorted(self.functions.keys()):
fdata = self.functions[function]
names = [self.status_names[value] for value in fdata.keys()]
for name in sorted(names):
sys.stdout.write('{} {}\n'.format(function, name))
def collect_status_logs(options):
"""Build and run unit tests and report observed function return statuses.
Build Mbed TLS with -DRECORD_PSA_STATUS_COVERAGE_LOG, run the
test suites and display information about observed return statuses.
"""
rebuilt = False
if not options.use_existing_log and os.path.exists(options.log_file):
os.remove(options.log_file)
if not os.path.exists(options.log_file):
if options.clean_before:
subprocess.check_call(['make', '-f', 'scripts/legacy.make', 'clean'],
cwd='tests',
stdout=sys.stderr)
with open(os.devnull, 'w') as devnull:
make_q_ret = subprocess.call(['make', '-f', 'scripts/legacy.make',
'-q', 'lib', 'tests'],
stdout=devnull, stderr=devnull)
if make_q_ret != 0:
subprocess.check_call(['make', '-f', 'scripts/legacy.make',
'RECORD_PSA_STATUS_COVERAGE_LOG=1'],
stdout=sys.stderr)
rebuilt = True
subprocess.check_call(['make', '-f', 'scripts/legacy.make', 'test'],
stdout=sys.stderr)
data = Statuses()
data.collect_log(options.log_file)
data.get_constant_names(options.psa_constant_names)
if rebuilt and options.clean_after:
subprocess.check_call(['make', '-f', 'scripts/legacy.make', 'clean'],
cwd='tests',
stdout=sys.stderr)
return data
def main():
parser = argparse.ArgumentParser(description=globals()['__doc__'])
parser.add_argument('--clean-after',
action='store_true',
help='Run "make clean" after rebuilding')
parser.add_argument('--clean-before',
action='store_true',
help='Run "make clean" before regenerating the log file)')
parser.add_argument('--log-file', metavar='FILE',
default=DEFAULT_STATUS_LOG_FILE,
help='Log file location (default: {})'.format(
DEFAULT_STATUS_LOG_FILE
))
parser.add_argument('--psa-constant-names', metavar='PROGRAM',
default=DEFAULT_PSA_CONSTANT_NAMES,
help='Path to psa_constant_names (default: {})'.format(
DEFAULT_PSA_CONSTANT_NAMES
))
parser.add_argument('--use-existing-log', '-e',
action='store_true',
help='Don\'t regenerate the log file if it exists')
options = parser.parse_args()
data = collect_status_logs(options)
data.report()
if __name__ == '__main__':
main()

View File

@@ -1,89 +0,0 @@
#!/bin/sh
help () {
cat <<EOF
Usage: $0 [OPTION] [PLATFORM]...
Run all the metatests whose platform matches any of the given PLATFORM.
A PLATFORM can contain shell wildcards.
Expected output: a lot of scary-looking error messages, since each
metatest is expected to report a failure. The final line should be
"Ran N metatests, all good."
If something goes wrong: the final line should be
"Ran N metatests, X unexpected successes". Look for "Unexpected success"
in the logs above.
-l List the available metatests, don't run them.
EOF
}
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
set -e -u
if [ -d programs ]; then
METATEST_PROGRAM=programs/test/metatest
elif [ -d ../programs ]; then
METATEST_PROGRAM=../programs/test/metatest
elif [ -d ../../programs ]; then
METATEST_PROGRAM=../../programs/test/metatest
else
echo >&2 "$0: FATAL: programs/test/metatest not found"
exit 120
fi
LIST_ONLY=
while getopts hl OPTLET; do
case $OPTLET in
h) help; exit;;
l) LIST_ONLY=1;;
\?) help >&2; exit 120;;
esac
done
shift $((OPTIND - 1))
list_matches () {
while read name platform junk; do
for pattern in "$@"; do
case $platform in
$pattern) echo "$name"; break;;
esac
done
done
}
count=0
errors=0
run_metatest () {
ret=0
"$METATEST_PROGRAM" "$1" || ret=$?
if [ $ret -eq 0 ]; then
echo >&2 "$0: Unexpected success: $1"
errors=$((errors + 1))
fi
count=$((count + 1))
}
# Don't pipe the output of metatest so that if it fails, this script exits
# immediately with a failure status.
full_list=$("$METATEST_PROGRAM" list)
matching_list=$(printf '%s\n' "$full_list" | list_matches "$@")
if [ -n "$LIST_ONLY" ]; then
printf '%s\n' $matching_list
exit
fi
for name in $matching_list; do
run_metatest "$name"
done
if [ $errors -eq 0 ]; then
echo "Ran $count metatests, all good."
exit 0
else
echo "Ran $count metatests, $errors unexpected successes."
exit 1
fi

View File

@@ -1,65 +0,0 @@
#!/usr/bin/env python3
"""Run the Mbed TLS demo scripts.
"""
import argparse
import glob
import subprocess
import sys
def run_demo(demo, quiet=False):
"""Run the specified demo script. Return True if it succeeds."""
args = {}
if quiet:
args['stdout'] = subprocess.DEVNULL
args['stderr'] = subprocess.DEVNULL
returncode = subprocess.call([demo], **args)
return returncode == 0
def run_demos(demos, quiet=False):
"""Run the specified demos and print summary information about failures.
Return True if all demos passed and False if a demo fails.
"""
failures = []
for demo in demos:
if not quiet:
print('#### {} ####'.format(demo))
success = run_demo(demo, quiet=quiet)
if not success:
failures.append(demo)
if not quiet:
print('{}: FAIL'.format(demo))
if quiet:
print('{}: {}'.format(demo, 'PASS' if success else 'FAIL'))
else:
print('')
successes = len(demos) - len(failures)
print('{}/{} demos passed'.format(successes, len(demos)))
if failures and not quiet:
print('Failures:', *failures)
return not failures
def run_all_demos(quiet=False):
"""Run all the available demos.
Return True if all demos passed and False if a demo fails.
"""
mbedtls_demos = glob.glob('programs/*/*_demo.sh')
tf_psa_crypto_demos = glob.glob('tf-psa-crypto/programs/*/*_demo.sh')
all_demos = mbedtls_demos + tf_psa_crypto_demos
if not all_demos:
# Keep the message on one line. pylint: disable=line-too-long
raise Exception('No demos found. run_demos needs to operate from the Mbed TLS toplevel directory.')
return run_demos(all_demos, quiet=quiet)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--quiet', '-q',
action='store_true',
help="suppress the output of demos")
options = parser.parse_args()
success = run_all_demos(quiet=options.quiet)
sys.exit(0 if success else 1)
if __name__ == '__main__':
main()

View File

@@ -1,175 +0,0 @@
#!/usr/bin/env python3
"""Test helper for the Mbed TLS configuration file tool
Run config.py with various parameters and write the results to files.
This is a harness to help regression testing, not a functional tester.
Sample usage:
test_config_script.py -d old
## Modify config.py and/or mbedtls_config.h ##
test_config_script.py -d new
diff -ru old new
"""
## Copyright The Mbed TLS Contributors
## SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
##
import argparse
import glob
import os
import re
import shutil
import subprocess
OUTPUT_FILE_PREFIX = 'config-'
def output_file_name(directory, stem, extension):
return os.path.join(directory,
'{}{}.{}'.format(OUTPUT_FILE_PREFIX,
stem, extension))
def cleanup_directory(directory):
"""Remove old output files."""
for extension in []:
pattern = output_file_name(directory, '*', extension)
filenames = glob.glob(pattern)
for filename in filenames:
os.remove(filename)
def prepare_directory(directory):
"""Create the output directory if it doesn't exist yet.
If there are old output files, remove them.
"""
if os.path.exists(directory):
cleanup_directory(directory)
else:
os.makedirs(directory)
def guess_presets_from_help(help_text):
"""Figure out what presets the script supports.
help_text should be the output from running the script with --help.
"""
# Try the output format from config.py
hits = re.findall(r'\{([-\w,]+)\}', help_text)
for hit in hits:
words = set(hit.split(','))
if 'get' in words and 'set' in words and 'unset' in words:
words.remove('get')
words.remove('set')
words.remove('unset')
return words
# Try the output format from config.pl
hits = re.findall(r'\n +([-\w]+) +- ', help_text)
if hits:
return hits
raise Exception("Unable to figure out supported presets. Pass the '-p' option.")
def list_presets(options):
"""Return the list of presets to test.
The list is taken from the command line if present, otherwise it is
extracted from running the config script with --help.
"""
if options.presets:
return re.split(r'[ ,]+', options.presets)
else:
help_text = subprocess.run([options.script, '--help'],
check=False, # config.pl --help returns 255
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout
return guess_presets_from_help(help_text.decode('ascii'))
def run_one(options, args, stem_prefix='', input_file=None):
"""Run the config script with the given arguments.
Take the original content from input_file if specified, defaulting
to options.input_file if input_file is None.
Write the following files, where xxx contains stem_prefix followed by
a filename-friendly encoding of args:
* config-xxx.h: modified file.
* config-xxx.out: standard output.
* config-xxx.err: standard output.
* config-xxx.status: exit code.
Return ("xxx+", "path/to/config-xxx.h") which can be used as
stem_prefix and input_file to call this function again with new args.
"""
if input_file is None:
input_file = options.input_file
stem = stem_prefix + '-'.join(args)
data_filename = output_file_name(options.output_directory, stem, 'h')
stdout_filename = output_file_name(options.output_directory, stem, 'out')
stderr_filename = output_file_name(options.output_directory, stem, 'err')
status_filename = output_file_name(options.output_directory, stem, 'status')
shutil.copy(input_file, data_filename)
# Pass only the file basename, not the full path, to avoid getting the
# directory name in error messages, which would make comparisons
# between output directories more difficult.
cmd = [os.path.abspath(options.script),
'-f', os.path.basename(data_filename)]
with open(stdout_filename, 'wb') as out:
with open(stderr_filename, 'wb') as err:
status = subprocess.call(cmd + args,
cwd=options.output_directory,
stdin=subprocess.DEVNULL,
stdout=out, stderr=err)
with open(status_filename, 'w') as status_file:
status_file.write('{}\n'.format(status))
return stem + "+", data_filename
### A list of symbols to test with.
### This script currently tests what happens when you change a symbol from
### having a value to not having a value or vice versa. This is not
### necessarily useful behavior, and we may not consider it a bug if
### config.py stops handling that case correctly.
TEST_SYMBOLS = [
'CUSTOM_SYMBOL', # does not exist
'PSA_WANT_KEY_TYPE_AES', # set, no value
'MBEDTLS_MPI_MAX_SIZE', # unset, has a value
'MBEDTLS_NO_UDBL_DIVISION', # unset, in "System support"
'MBEDTLS_PLATFORM_ZEROIZE_ALT', # unset, in "Customisation configuration options"
]
def run_all(options):
"""Run all the command lines to test."""
presets = list_presets(options)
for preset in presets:
run_one(options, [preset])
for symbol in TEST_SYMBOLS:
run_one(options, ['get', symbol])
(stem, filename) = run_one(options, ['set', symbol])
run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
run_one(options, ['--force', 'set', symbol])
(stem, filename) = run_one(options, ['set', symbol, 'value'])
run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
run_one(options, ['--force', 'set', symbol, 'value'])
run_one(options, ['unset', symbol])
def main():
"""Command line entry point."""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', metavar='DIR',
dest='output_directory', required=True,
help="""Output directory.""")
parser.add_argument('-f', metavar='FILE',
dest='input_file', default='include/mbedtls/mbedtls_config.h',
help="""Config file (default: %(default)s).""")
parser.add_argument('-p', metavar='PRESET,...',
dest='presets',
help="""Presets to test (default: guessed from --help).""")
parser.add_argument('-s', metavar='FILE',
dest='script', default='scripts/config.py',
help="""Configuration script (default: %(default)s).""")
options = parser.parse_args()
prepare_directory(options.output_directory)
run_all(options)
if __name__ == '__main__':
main()