Skip to content

Commit

Permalink
test: add pylint on our test
Browse files Browse the repository at this point in the history
Given that our testsuite is relatively complex it seems nice to
let pylint have an opinion about it :)

This commit adds the needed test and also fixes the various
issues it found.
  • Loading branch information
mvo5 committed Aug 2, 2024
1 parent edff0bd commit 3ce465d
Show file tree
Hide file tree
Showing 12 changed files with 76 additions and 58 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ jobs:
- name: Install test dependencies
run: |
sudo apt update
sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static
sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static pylint
- name: Diskspace (before)
run: |
df -h
Expand Down
2 changes: 1 addition & 1 deletion test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def force_aws_upload_fixture(request):

# see https://hackebrot.github.io/pytest-tricks/param_id_func/ and
# https://docs.pytest.org/en/7.1.x/reference/reference.html#pytest.hookspec.pytest_make_parametrize_id
def pytest_make_parametrize_id(config, val):
def pytest_make_parametrize_id(config, val): # pylint: disable=W0613
if isinstance(val, TestCase):
return f"{val}"
return None
1 change: 1 addition & 0 deletions test/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ flake8==6.1.0
paramiko==2.12.0
boto3==1.33.13
qmp==1.1.0
pylint==3.2.5
19 changes: 10 additions & 9 deletions test/test_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,17 @@
import random
import re
import shutil
import subprocess
import string
import subprocess
import tempfile
import uuid
from contextlib import contextmanager
from typing import NamedTuple

import pytest

# local test utils
import testutil
from containerbuild import build_container_fixture # noqa: F401
from containerbuild import build_container_fixture # pylint: disable=unused-import
from testcases import CLOUD_BOOT_IMAGE_TYPES, DISK_IMAGE_TYPES, gen_testcases
from vm import AWS, QEMU

Expand Down Expand Up @@ -46,8 +45,8 @@ class ImageBuildResult(NamedTuple):
metadata: dict = {}


@pytest.fixture(scope='session')
def shared_tmpdir(tmpdir_factory):
@pytest.fixture(name="shared_tmpdir", scope='session')
def shared_tmpdir_fixture(tmpdir_factory):
tmp_path = pathlib.Path(tmpdir_factory.mktemp("shared"))
yield tmp_path

Expand Down Expand Up @@ -89,6 +88,8 @@ def images_fixture(shared_tmpdir, build_container, request, force_aws_upload):
yield build_results


# XXX: refactor
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
@contextmanager
def build_images(shared_tmpdir, build_container, request, force_aws_upload):
"""
Expand Down Expand Up @@ -391,8 +392,8 @@ def test_ami_boots_in_aws(image_type, force_aws_upload):


def log_has_osbuild_selinux_denials(log):
OSBUID_SELINUX_DENIALS_RE = re.compile(r"(?ms)avc:\ +denied.*osbuild")
return re.search(OSBUID_SELINUX_DENIALS_RE, log)
osbuid_selinux_denials_re = re.compile(r"(?ms)avc:\ +denied.*osbuild")
return re.search(osbuid_selinux_denials_re, log)


def parse_ami_id_from_log(log_output):
Expand Down Expand Up @@ -420,7 +421,7 @@ def test_osbuild_selinux_denials_re_works():


def has_selinux():
return testutil.has_executable("selinuxenabled") and subprocess.run("selinuxenabled").returncode == 0
return testutil.has_executable("selinuxenabled") and subprocess.run("selinuxenabled", check=False).returncode == 0


@pytest.mark.skipif(not has_selinux(), reason="selinux not enabled")
Expand All @@ -437,7 +438,7 @@ def test_image_build_without_se_linux_denials(image_type):
def test_iso_installs(image_type):
installer_iso_path = image_type.img_path
test_disk_path = installer_iso_path.with_name("test-disk.img")
with open(test_disk_path, "w") as fp:
with open(test_disk_path, "w", encoding="utf8") as fp:
fp.truncate(10_1000_1000_1000)
# install to test disk
with QEMU(test_disk_path, cdrom=installer_iso_path) as vm:
Expand Down
2 changes: 1 addition & 1 deletion test/test_flake8.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@ def test_flake8():
p = pathlib.Path(__file__).parent
# TODO: use all static checks from osbuild instead
subprocess.check_call(
["flake8", "--ignore=E402", "--max-line-length=120",
["flake8", "--ignore=E402,F811,F401", "--max-line-length=120",
os.fspath(p)])
8 changes: 4 additions & 4 deletions test/test_manifest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
import textwrap

import pytest

import testutil
from containerbuild import build_container_fixture # noqa: F401

from containerbuild import build_container_fixture # pylint: disable=unused-import
from containerbuild import make_container
from testcases import gen_testcases

Expand Down Expand Up @@ -291,7 +291,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container):
# no need to parameterize this test, toml is the same for all containers
container_ref = "quay.io/centos-bootc/centos-bootc:stream9"

CFG = {
cfg = {
"blueprint": {
"customizations": {
"filesystem": [
Expand All @@ -315,7 +315,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container):
output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output"
output_path.mkdir(exist_ok=True)
config_json_path = output_path / "config.json"
config_json_path.write_text(json.dumps(CFG), encoding="utf-8")
config_json_path.write_text(json.dumps(cfg), encoding="utf-8")

with pytest.raises(subprocess.CalledProcessError) as exc:
subprocess.check_output([
Expand Down
4 changes: 2 additions & 2 deletions test/test_opts.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import subprocess

import pytest

from containerbuild import build_container_fixture, build_fake_container_fixture # noqa: F401
# pylint: disable=unused-import
from containerbuild import build_container_fixture, build_fake_container_fixture


@pytest.fixture(name="container_storage", scope="session")
Expand Down
18 changes: 18 additions & 0 deletions test/test_pylint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import pathlib
import subprocess


def test_pylint():
p = pathlib.Path(__file__).parent
subprocess.check_call(
["pylint",
"--disable=fixme",
"--disable=missing-class-docstring",
"--disable=missing-module-docstring",
"--disable=missing-function-docstring",
"--disable=too-many-instance-attributes",
# false positive because of "if yield else yield" in
# the "build_container" fixture, see
# https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/contextmanager-generator-missing-cleanup.html
"--disable=contextmanager-generator-missing-cleanup",
"--max-line-length=120"] + list(p.glob("*.py")))
16 changes: 8 additions & 8 deletions test/testcases.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,17 +56,17 @@ class TestCaseCentos(TestCase):
"quay.io/centos-bootc/centos-bootc:stream9")


def gen_testcases(what):
def gen_testcases(what): # pylint: disable=too-many-return-statements
if what == "manifest":
return [TestCaseCentos(), TestCaseFedora()]
elif what == "default-rootfs":
if what == "default-rootfs":
# Fedora doesn't have a default rootfs
return [TestCaseCentos()]
elif what == "ami-boot":
if what == "ami-boot":
return [TestCaseCentos(image="ami"), TestCaseFedora(image="ami")]
elif what == "anaconda-iso":
if what == "anaconda-iso":
return [TestCaseCentos(image="anaconda-iso"), TestCaseFedora(image="anaconda-iso")]
elif what == "qemu-boot":
if what == "qemu-boot":
test_cases = [
klass(image=img)
for klass in (TestCaseCentos, TestCaseFedora)
Expand All @@ -81,13 +81,13 @@ def gen_testcases(what):
# TODO: add arm64->x86_64 cross build test too
pass
return test_cases
elif what == "all":
if what == "all":
return [
klass(image=img)
for klass in (TestCaseCentos, TestCaseFedora)
for img in ("ami", "anaconda-iso", "qcow2", "raw", "vmdk")
]
elif what == "multidisk":
if what == "multidisk":
# single test that specifies all image types
image = "+".join(DISK_IMAGE_TYPES)
return [
Expand All @@ -96,7 +96,7 @@ def gen_testcases(what):
]
# Smoke test that all supported --target-arch architecture can
# create a manifest
elif what == "target-arch-smoke":
if what == "target-arch-smoke":
return [
TestCaseCentos(target_arch="arm64"),
# TODO: merge with TestCaseFedora once the arches are build there
Expand Down
11 changes: 5 additions & 6 deletions test/testutil.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def run_journalctl(*args):

def journal_cursor():
output = run_journalctl("-n0", "--show-cursor")
cursor = output.split("\n")[-1]
cursor = output.rsplit("\n", maxsplit=1)[-1]
return cursor.split("cursor: ")[-1]


Expand All @@ -43,7 +43,7 @@ def get_free_port() -> int:


def wait_ssh_ready(address, port, sleep, max_wait_sec):
for i in range(int(max_wait_sec / sleep)):
for _ in range(int(max_wait_sec / sleep)):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(sleep)
try:
Expand All @@ -61,7 +61,7 @@ def has_x86_64_v3_cpu():
# x86_64-v3 has multiple features, see
# https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
# but "avx2" is probably a good enough proxy
return " avx2 " in pathlib.Path("/proc/cpuinfo").read_text()
return " avx2 " in pathlib.Path("/proc/cpuinfo").read_text("utf8")


def can_start_rootful_containers():
Expand All @@ -70,16 +70,15 @@ def can_start_rootful_containers():
# on linux we need to run "podman" with sudo to get full
# root containers
return os.getuid() == 0
elif system == "Darwin":
if system == "Darwin":
# on darwin a container is root if the podman machine runs
# in "rootful" mode, i.e. no need to run "podman" as root
# as it's just proxying to the VM
res = subprocess.run([
"podman", "machine", "inspect", "--format={{.Rootful}}",
], capture_output=True, encoding="utf8", check=True)
return res.stdout.strip() == "true"
else:
raise ValueError(f"unknown platform {system}")
raise ValueError(f"unknown platform {system}")


def write_aws_creds(path):
Expand Down
43 changes: 21 additions & 22 deletions test/testutil_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,12 @@
from unittest.mock import call, patch

import pytest

from testutil import get_free_port, has_executable, wait_ssh_ready


def test_get_free_port():
port_nr = get_free_port()
assert port_nr > 1024 and port_nr < 65535
assert 1024 < port_nr < 65535


@pytest.fixture(name="free_port")
Expand All @@ -26,34 +25,34 @@ def test_wait_ssh_ready_sleeps_no_connection(mocked_sleep, free_port):


@pytest.mark.skipif(not has_executable("nc"), reason="needs nc")
def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path):
def test_wait_ssh_ready_sleeps_wrong_reply(free_port):
with contextlib.ExitStack() as cm:
p = subprocess.Popen(
with subprocess.Popen(
f"echo not-ssh | nc -vv -l -p {free_port}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
)
cm.callback(p.kill)
# wait for nc to be ready
while True:
# netcat tranditional uses "listening", others "Listening"
# so just omit the first char
if "istening " in p.stdout.readline():
break
# now connect
with patch("time.sleep") as mocked_sleep:
with pytest.raises(ConnectionRefusedError):
wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=0.55)
assert mocked_sleep.call_args_list == [
call(0.1), call(0.1), call(0.1), call(0.1), call(0.1)]
) as p:
cm.callback(p.kill)
# wait for nc to be ready
while True:
# netcat tranditional uses "listening", others "Listening"
# so just omit the first char
if "istening " in p.stdout.readline():
break
# now connect
with patch("time.sleep") as mocked_sleep:
with pytest.raises(ConnectionRefusedError):
wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=0.55)
assert mocked_sleep.call_args_list == [
call(0.1), call(0.1), call(0.1), call(0.1), call(0.1)]


@pytest.mark.skipif(platform.system() == "Darwin", reason="hangs on macOS")
@pytest.mark.skipif(not has_executable("nc"), reason="needs nc")
def test_wait_ssh_ready_integration(free_port, tmp_path):
def test_wait_ssh_ready_integration(free_port):
with contextlib.ExitStack() as cm:
p = subprocess.Popen(f"echo OpenSSH | nc -l -p {free_port}", shell=True)
cm.callback(p.kill)
wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=10)
with subprocess.Popen(f"echo OpenSSH | nc -l -p {free_port}", shell=True) as p:
cm.callback(p.kill)
wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=10)
8 changes: 4 additions & 4 deletions test/vm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import abc
import os
import paramiko
import pathlib
import platform
import subprocess
Expand All @@ -10,9 +9,9 @@
from io import StringIO

import boto3
import paramiko
from botocore.exceptions import ClientError
from paramiko.client import AutoAddPolicy, SSHClient

from testutil import AWS_REGION, get_free_port, wait_ssh_ready


Expand Down Expand Up @@ -163,6 +162,7 @@ def start(self, wait_event="ssh", snapshot=True, use_ovmf=False):
self._address = "localhost"

# XXX: use systemd-run to ensure cleanup?
# pylint: disable=consider-using-with
self._qemu_p = subprocess.Popen(
self._gen_qemu_cmdline(snapshot, use_ovmf),
stdout=sys.stdout,
Expand All @@ -185,11 +185,11 @@ def _wait_qmp_socket(self, timeout_sec):
if os.path.exists(self._qmp_socket):
return True
time.sleep(1)
raise Exception(f"no {self._qmp_socket} after {timeout_sec} seconds")
raise TimeoutError(f"no {self._qmp_socket} after {timeout_sec} seconds")

def wait_qmp_event(self, qmp_event):
# import lazy to avoid requiring it for all operations
import qmp
import qmp # pylint: disable=import-outside-toplevel
self._wait_qmp_socket(30)
mon = qmp.QEMUMonitorProtocol(os.fspath(self._qmp_socket))
mon.connect()
Expand Down

0 comments on commit 3ce465d

Please sign in to comment.