Skip to content

Commit 478b133

Browse files
committed
feat: integrate deployment to dev of new function
1 parent 619a388 commit 478b133

File tree

5 files changed

+152
-24
lines changed

5 files changed

+152
-24
lines changed
Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
name: Test & Deploy Evaluation Function to AWS Lambda
2+
3+
on:
4+
push:
5+
branches:
6+
- listing_question
7+
workflow_dispatch:
8+
9+
jobs:
10+
test:
11+
name: Test
12+
runs-on: ubuntu-latest
13+
14+
strategy:
15+
fail-fast: false
16+
matrix:
17+
python-version: [3.11]
18+
19+
defaults:
20+
run:
21+
working-directory: app/
22+
23+
env:
24+
REQUEST_SCHEMA_URL: https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/request.json
25+
RESPONSE_SCHEMA_URL: https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/responsev2.json
26+
27+
steps:
28+
- name: Checkout
29+
uses: actions/checkout@v2
30+
31+
- name: Set up Python ${{ matrix.python-version }}
32+
uses: actions/setup-python@v2
33+
with:
34+
python-version: ${{ matrix.python-version }}
35+
36+
- name: Install dependencies
37+
run: |
38+
python -m pip install --upgrade pip
39+
python -m pip install flake8 pytest
40+
python -m pip install -r requirements.txt
41+
# python -m nltk.downloader wordnet
42+
# python -m nltk.downloader word2vec_sample
43+
# python -m nltk.downloader brown
44+
# python -m nltk.downloader punkt
45+
# python -m nltk.downloader stopwords
46+
47+
- name: Lint with flake8
48+
run: |
49+
# stop the build if there are Python syntax errors or undefined names
50+
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
51+
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
52+
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
53+
54+
- name: Test Evaluation Function
55+
run: |
56+
pytest -v evaluation_tests.py::TestEvaluationFunction
57+
58+
deploy-dev:
59+
name: Deploy Dev
60+
needs: test
61+
runs-on: ubuntu-latest
62+
environment: production
63+
env:
64+
ECR_REPOSITORY: lambda-feedback-dev-functions-repository
65+
66+
steps:
67+
- name: Checkout
68+
uses: actions/checkout@v2
69+
70+
- name: Set config.json output
71+
id: set_config_var
72+
run: |
73+
content=`cat ./config.json`
74+
# the following lines are only required for multi line json
75+
content="${content//'%'/'%25'}"
76+
content="${content//$'\n'/'%0A'}"
77+
content="${content//$'\r'/'%0D'}"
78+
# end of optional handling for multi line json
79+
echo "::set-output name=configJson::$content"
80+
81+
- name: set Evaluation Function Name
82+
id: set_function_name
83+
run: |
84+
functionName="${{fromJson(steps.set_config_var.outputs.configJson).EvaluationFunctionName}}"
85+
[[ -z "$functionName" ]] && { echo "Add EvaluationFunctionName to config.json" ; exit 1; }
86+
echo "::set-output name=function_name::$functionName"
87+
88+
- name: Configure AWS credentials
89+
uses: aws-actions/configure-aws-credentials@v1
90+
with:
91+
aws-access-key-id: ${{ secrets.LAMBDA_CONTAINER_PIPELINE_AWS_ID }}
92+
aws-secret-access-key: ${{ secrets.LAMBDA_CONTAINER_PIPELINE_AWS_SECRET }}
93+
aws-region: eu-west-2
94+
95+
- name: Login to Amazon ECR
96+
id: login-ecr
97+
uses: aws-actions/amazon-ecr-login@v1
98+
99+
- name: Build, tag, and push image to Amazon ECR
100+
id: build-image
101+
env:
102+
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
103+
IMAGE_TAG: ${{ steps.set_function_name.outputs.function_name }}
104+
run: |
105+
# Build docker image from algorithm, schema and requirements
106+
docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG app/
107+
# Push image to ECR
108+
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
109+
echo "::set-output name=image::$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG"
110+
111+
- name: deploy evaluation function
112+
id: deploy-evaluation-function
113+
env:
114+
BACKEND_API_URL: https://dev-api.lambdafeedback.com
115+
API_KEY: ${{ secrets.FUNCTION_ADMIN_API_KEY }}
116+
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
117+
IMAGE_TAG: ${{ steps.set_function_name.outputs.function_name }}
118+
run: |
119+
curl --location --request POST "$BACKEND_API_URL/grading-function/ensure" \
120+
--header 'content-type: application/json' \
121+
--data-raw "{
122+
\"apiKey\": \"$API_KEY\",
123+
\"dockerImageUri\": \"$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG\",
124+
\"functionName\": \"$IMAGE_TAG\"
125+
}"

app/Dockerfile

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,23 @@
1-
# Base image that bundles AWS Lambda Python 3.8 image with some middleware functions
2-
# FROM base-eval-tmp
3-
FROM rabidsheep55/python-base-eval-layer
1+
FROM rabidsheep55/python-base-eval-layer
2+
# NOTE: this is platform "linux/amd64"
3+
# FROM ghcr.io/lambda-feedback/evaluation-function-base/python:3.11
4+
# NOTE: transformers from huggingface requires platform "linux/arm64"
5+
46

57
WORKDIR /app
68

7-
RUN mkdir /usr/share/nltk_data
8-
RUN mkdir -p /usr/share/nltk_data/corpora /usr/share/nltk_data/models /usr/share/nltk_data/tokenizers
9+
# RUN mkdir /usr/share/nltk_data
10+
# RUN mkdir -p /usr/share/nltk_data/corpora /usr/share/nltk_data/models /usr/share/nltk_data/tokenizers
911

10-
ARG NLTK_DATA=/usr/share/nltk_data
12+
# ARG NLTK_DATA=/usr/share/nltk_data
1113

12-
ENV NLTK_DATA=/usr/share/nltk_data
14+
# ENV NLTK_DATA=/usr/share/nltk_data
1315
# Copy and install any packages/modules needed for your evaluation script.
1416
COPY requirements.txt .
15-
COPY brown_length .
16-
COPY word_freqs .
17-
COPY w2v .
18-
RUN yum install -y wget unzip
17+
# COPY brown_length .
18+
# COPY word_freqs .
19+
# COPY w2v .
20+
# RUN yum install -y wget unzip
1921
RUN pip3 install -r requirements.txt
2022

2123
# # Download NLTK data files
@@ -40,17 +42,17 @@ RUN pip3 install -r requirements.txt
4042
# RUN rm /usr/share/nltk_data/tokenizers/*.zip
4143

4244
# Warnings: those commands sometimes download corrupted zips, so it is better to wget each package from the main site
43-
RUN python -m nltk.downloader wordnet
44-
RUN python -m nltk.downloader word2vec_sample
45-
RUN python -m nltk.downloader brown
46-
RUN python -m nltk.downloader stopwords
47-
RUN python -m nltk.downloader punkt
48-
RUN python -m nltk.downloader punkt_tab
45+
# RUN python -m nltk.downloader wordnet
46+
# RUN python -m nltk.downloader word2vec_sample
47+
# RUN python -m nltk.downloader brown
48+
# RUN python -m nltk.downloader stopwords
49+
# RUN python -m nltk.downloader punkt
50+
# RUN python -m nltk.downloader punkt_tab
4951

5052
# Copy the evaluation and testing scripts
51-
COPY brown_length ./app/
52-
COPY word_freqs ./app/
53-
COPY w2v ./app/
53+
# COPY brown_length ./app/
54+
# COPY word_freqs ./app/
55+
# COPY w2v ./app/
5456
COPY evaluation.py ./app/
5557
COPY evaluation_tests.py ./app/
5658

app/evaluation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def setup_llm(config):
3030
max_tokens=config.max_new_token,
3131
openai_api_key=config.openai_api_key
3232
)
33-
elif config.mode == 'llama3':
33+
elif config.mode == 'llama3': # NOTE: langchain_huggingface expected "linux/arm64"
3434
from langchain_huggingface import HuggingFaceEndpoint
3535
return HuggingFaceEndpoint(
3636
endpoint_url=config.endpoint_3_1_8B,

app/requirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,5 @@ certifi
1111
pandas
1212
langchain
1313
langchain-openai
14-
python-dotenv
14+
python-dotenv
15+
langchain_huggingface

config.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
"EvaluationFunctionName": "shortTextAnswer"
3-
}
2+
"EvaluationFunctionName": "listingQuestion"
3+
}

0 commit comments

Comments
 (0)