Skip to content

Use GGUF to store model weights #172

Use GGUF to store model weights

Use GGUF to store model weights #172

Workflow file for this run

name: CI
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
basic:
name: Linux/macOS (${{ matrix.os }})
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: ["macos-latest", "ubuntu-latest"]
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: mamba-org/provision-with-micromamba@main
with:
environment-file: environment.yml
extra-specs: |
sel(osx): gfortran=11.3.0
- uses: hendrikmuhs/ccache-action@main
with:
variant: sccache
key: ${{ github.job }}-${{ matrix.os }}
- name: Install GGUF
shell: bash -e -x -l {0}
run: |
git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp
git checkout 4e9a7f7f7fb6acbddd1462909c8d696e38edbfcc
cd gguf-py
pip install .
cd ../..
- name: Build and run
shell: bash -l {0}
run: |
ci/build.sh