Skip to content

[release] 0.0.1-beta.7 - Allow metadata on Eval() scores (#21) #2

[release] 0.0.1-beta.7 - Allow metadata on Eval() scores (#21)

[release] 0.0.1-beta.7 - Allow metadata on Eval() scores (#21) #2

Workflow file for this run

name: Test
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
test-core:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.9"
- name: Upgrade pip
run: python -m pip install --upgrade pip
- name: Install Deps
run: |
pip install --upgrade -r requirements.txt
pip install --upgrade .[test]
- name: Run Core Tests
run: pytest tests/ --ignore=tests/ai_tests/ -q
env:
test_api_key: ${{ secrets.SDK_CONSISTENCY_TEST_COMPANY_API_KEY }}
test_client_key: ${{ secrets.KONG_CLIENT_SDK_KEY }}
test-ai:
needs: test-core
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.9"
- name: Upgrade pip
run: python -m pip install --upgrade pip
- name: Install Deps
run: |
pip install --upgrade -r requirements.txt
pip install --upgrade .[test]
- name: Run AI Tests
run: pytest tests/ai_tests/ -q
env:
test_api_key: ${{ secrets.SDK_CONSISTENCY_TEST_COMPANY_API_KEY }}
test_client_key: ${{ secrets.KONG_CLIENT_SDK_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_AI_SDK_TEST_API_KEY }}