From 5ae32630da7f363cc73214297d544ad35824dc81 Mon Sep 17 00:00:00 2001 From: Franceso Bonanno Date: Mon, 21 Feb 2022 19:49:34 +0100 Subject: [PATCH] Adding CI/CD and container building support --- ci/Containerfile | 31 +++++++++ ci/README.md | 37 ++++++++++ ci/build.sh | 176 +++++++++++++++++++++++++++++++++++++++++++++++ ci/entrypoint.sh | 30 ++++++++ 4 files changed, 274 insertions(+) create mode 100644 ci/Containerfile create mode 100644 ci/README.md create mode 100755 ci/build.sh create mode 100755 ci/entrypoint.sh diff --git a/ci/Containerfile b/ci/Containerfile new file mode 100644 index 0000000..ba40c01 --- /dev/null +++ b/ci/Containerfile @@ -0,0 +1,31 @@ +ARG ARCH= +FROM docker.io/$ARCH/debian:latest + +ARG DEBIAN_FRONTEND=noninteractive +ENV ARCH= + +WORKDIR /src + +COPY entrypoint.sh /entrypoint.sh + +RUN apt update && apt -y full-upgrade && apt install -y --no-install-recommends \ +build-essential \ +autoconf \ +automake \ +git \ +gettext \ +gawk \ +sbcl \ +xdg-utils \ +libssl-dev \ +libncurses-dev \ +libsqlite3-dev \ +openssl \ +curl \ +ca-certificates \ +gpg \ +wget \ +unzip && apt clean + +ENTRYPOINT ["/entrypoint.sh"] + diff --git a/ci/README.md b/ci/README.md new file mode 100644 index 0000000..11326e2 --- /dev/null +++ b/ci/README.md @@ -0,0 +1,37 @@ +## Build with containers + +The directory with the configs and the scripts to build with containers is named 'ci', as this is usually used for a Continuous Integration or Continuous Delivery (CI/CD) infrastructure. + + + +The bash script build.sh do all the needed stuffs for you, but one between podman or docker must be installed on your system and in your PATH. If both are installed, podman is preferred, as its default is to run rootless. + + + +You have to invoke the script as PATH/TO/build.sh ARCH, for example, from the ci dir, the invocation for host arch is: + +```bash +./build.sh host +``` + +instead, from the source dir is: + +```bash +ci/build.sh host +``` + +This is the list of the supported archs: + +- host (host architecture, autodetected) +- amd64 +- i386 +- armhf +- arm64 +- ppc64el + +For using foreign architectures (not host and supported natively, like i386 on amd64 or armhf on amd64), you must install qemu binfmt-support and qemu user static binaries first! Refer to you distribution documentation to how to setup binfmts support. +https://wiki.debian.org/QemuUserEmulation is valid for Debian, Ubuntu and derivatives, or https://github.com/multiarch/qemu-user-static for full road-to-containers solution. + +Currently, riscv64 and s390x are not supported because there is no sbcl for them in Debian (and the container uses Debian as base image), and ppc64el is supported as host only (for issues with qemu user binaries). + +The output of the build system (the tinmop binary), is under the out dir/ARCH/tinmop, and the default out dir is where there is the script, so in the case of the ci dir, ci/out/ARCH/tinmop . diff --git a/ci/build.sh b/ci/build.sh new file mode 100755 index 0000000..21972a0 --- /dev/null +++ b/ci/build.sh @@ -0,0 +1,176 @@ +#!/usr/bin/env bash + +set -e + +podman_bin=`command -v podman` +docker_bin=`command -v docker` +SCRIPT_DIR=$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +SRC_DIR=$(cd $SCRIPT_DIR/.. && pwd) +OUTDIR="$SCRIPT_DIR/out" +ARCH=$1 + +mkdir -p $OUTDIR +cd $SCRIPT_DIR + +build() { + [[ -z "$($CT_RUNTIME images -q tinmop_ci:$CT_TAG)" ]] || $CT_RUNTIME rmi tinmop_ci:$CT_TAG + $CT_RUNTIME build -t tinmop_ci:$CT_TAG -f Containerfile --build-arg ARCH=$CT_ARCH . + $CT_RUNTIME run --rm -v $SRC_DIR:/src/tinmop -v $OUTDIR:/src/out --name tinmop_ci $($CT_RUNTIME image ls -q tinmop_ci:$CT_TAG) + $CT_RUNTIME rmi tinmop_ci:$CT_TAG +} + +check_user_qemu() { + if [[ ! -f /proc/sys/fs/binfmt_misc/qemu-$USER_ARCH ]] + then + echo "You must have qemu user static for $USER_ARCH and binfmt support enabled for this arch!" + exit 1 + fi +} + +unsupported_arch() { + echo "Currently, you can not build for $ARCH via container. +Exiting..." + exit 0 +} + +unsupported_user_arch() { + echo "Currently, you can not build for $ARCH via qemu user static binary for issues with it. +Please try again, using an $ARCH host. +Exiting..." + exit 0 +} + +if [[ -x "${podman_bin}" ]] +then + echo 'Using podman...' + CT_RUNTIME=$podman_bin +elif [[ -x "${docker_bin}" ]] && [[ ${EUID} -eq 0 ]] +then + echo 'Using docker...' + CT_RUNTIME=$docker_bin +elif [[ -x "${docker_bin}" ]] && [[ ${EUID} -ne 0 ]] +then + echo "By default, docker do not run rootless. +You need to run $0 as root or via sudo." +else + echo 'You need to install podman or docker first! +See your distro docs to install one of them.' + exit 1 +fi + +# We are using Debian base image, so we can pass only valid archs from the base imge +# Mapping architecture names + +if [[ $ARCH == 'host' ]] +then + if [[ $(uname -m) == 'x86_64' ]] + then + CT_ARCH='amd64' + CT_TAG='amd64' + elif [[ $(uname -m) == 'i386' ]] + then + CT_ARCH='i386' + CT_TAG='i386' + elif [[ $(uname -m) == 'aarch64' ]] + then + CT_ARCH='arm64v8' + CT_TAG='arm64' + elif [[ $(uname -m) == 'armv7l' ]] + then + CT_ARCH='arm32v7' + CT_TAG='armhf' + elif [[ $(uname -m) == 'ppc64le' ]] + then + CT_ARCH='ppc64le' + CT_TAG='ppc64el' + elif [[ $(uname -m) == 's390x' ]] + then + CT_ARCH='s390x' + CT_TAG='s390x' + unsupported_arch + elif [[ $(uname -m) == 'riscv64' ]] + then + CT_ARCH='riscv64' + CT_TAG='riscv64' + unsupported_arch + else + echo 'Sorry, your host architecture is not supported! +Exiting...' + exit 1 + fi +elif [[ $ARCH == 'amd64' ]] +then + CT_ARCH='amd64' + CT_TAG=$ARCH + if [[ $(uname -m) != 'x86_64' ]] + then + USER_ARCH='x86_64' + check_user_qemu + fi +elif [[ $ARCH == 'i386' ]] +then + CT_ARCH='i386' + CT_TAG=$ARCH + if [[ $(uname -m) != 'i386' ]] && [[ $(uname -m) != 'x86_64' ]] + then + USER_ARCH='i386' + check_user_qemu + fi +elif [[ $ARCH == 'arm64' ]] +then + CT_ARCH='arm64v8' + CT_TAG=$ARCH + if [[ $(uname -m) != 'aarch64' ]] + then + USER_ARCH='aarch64' + check_user_qemu + fi +elif [[ $ARCH == 'armhf' ]] +then + CT_ARCH='arm32v7' + CT_TAG=$ARCH + if [[ $(uname -m) != 'armv7l' ]] && [[ $(uname -m) != 'aarch64' ]] + then + USER_ARCH='arm' + check_user_qemu + fi +elif [[ $ARCH == 'ppc64el' ]] +then + CT_ARCH='ppc64le' + CT_TAG=$ARCH + unsupported_user_arch + if [[ $(uname -m) != 'ppc64le' ]] + then + USER_ARCH='ppc64le' + check_user_qemu + fi +elif [[ $ARCH == 's390x' ]] +then + CT_ARCH='s390x' + CT_TAG=$ARCH + unsupported_arch + if [[ $(uname -m) != 's390x' ]] + then + USER_ARCH='s390x' + check_user_qemu + fi +elif [[ $ARCH == 'riscv64' ]] +then + CT_ARCH='riscv64' + CT_TAG=$ARCH + unsupported_arch + if [[ $(uname -m) != 'riscv64' ]] + then + USER_ARCH='riscv64' + check_user_qemu + fi +else + echo 'Please specific a valid architecture!' + exit 1 +fi + +echo "Building for $ARCH architecture..." +build +echo "All done! You can find executable at: +$SCRIPT_DIR/out/$ARCH/tinmop" +exit 0 diff --git a/ci/entrypoint.sh b/ci/entrypoint.sh new file mode 100755 index 0000000..f9e8879 --- /dev/null +++ b/ci/entrypoint.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -e + +if [[ ! -d /src/tinmop ]]; then + echo 'Please mount source at /src/tinmop' + exit 1 +elif [[ ! -d /src/out ]]; then + echo 'Please mount output dir' + exit 1 +fi + +cd /src/tinmop +rm -f /src/out/$(dpkg-architecture -qDEB_HOST_ARCH)/tinmop +if [[ -e Makefile ]]; then make distclean; fi +./configure +bash quick_quicklisp.sh --do-not-prompt +make -j$(nproc) + +# Do tests +make install +echo "gemini://medusae.space/index.gmi?about" | ./tinmop -e scripts/gemget.lisp + +# Copy binary to out dir +mkdir -p /src/out/$(dpkg-architecture -qDEB_HOST_ARCH) +mv /src/tinmop/tinmop /src/out/$(dpkg-architecture -qDEB_HOST_ARCH)/ + +# Clean and exit +if [[ -e Makefile ]]; then make distclean; fi +exit 0