pipeline: # Download the cache from S3 restore-cache: image: plugins/s3-cache pull: true endpoint: https://s3.roosens.me access_key: ${S3_USER} secret_key: ${S3_PASSWORD} root: build-cache/ restore: true secrets: [ s3_user, s3_password ] # =====LINTING===== lint-frontend: image: node:15-alpine3.13 group: lint commands: - cd web - yarn run lint lint-backend: image: chewingbever/fej-builder:latest pull: true group: lint commands: - cargo fmt -- --check # =====BUILDING===== build-frontend: image: node:15-alpine3.13 commands: - cd web - yarn install - yarn run build build-backend: image: chewingbever/fej-builder:latest commands: - cargo build # =====TESTING===== test-backend: image: chewingbever/fej-builder:latest commands: # This is run here because it requires compilation - cargo clippy -- --no-deps -D warnings - cargo test # =====REBUILD & FLUSH CACHE===== rebuild-cache: image: plugins/s3-cache endpoint: https://s3.roosens.me access_key: ${S3_USER} secret_key: ${S3_PASSWORD} root: build-cache/ rebuild: true mount: - target - web/node_modules secrets: [ s3_user, s3_password ] # Push the cache, even on failure when: status: [ success, failure ] flush-cache: image: plugins/s3-cache endpoint: https://s3.roosens.me access_key: ${S3_USER} secret_key: ${S3_PASSWORD} root: build-cache/ flush: true # Delete cache older than 30 days (might lower this) flush_age: 30 secrets: [ s3_user, s3_password ] # Push the cache, even on failure when: status: [ success, failure ] # publish-builder: # image: plugins/docker # repo: chewingbever/fej-builder # dockerfile: docker/Dockerfile.builder # tag: [ latest ] # secrets: [ docker_username, docker_password ] # when: # branch: develop # event: push # Backend cicd jobs are disabled until we can figure out a way to cache stuff # test-backend: # image: chewingbever/fej-builder:latest # # Always update the builder image # pull: true # commands: # - cargo test # TODO build dev & rel image, deploy these images # branches: [ master, develop ]