Skip to content

Commit a1a0ebb

Browse files
committed
Merge branch 'listing_question' of https://github.com/lambda-feedback/shortTextAnswer into listing_question
2 parents bf99b45 + c1ee593 commit a1a0ebb

File tree

6 files changed

+163
-32
lines changed

6 files changed

+163
-32
lines changed
Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
name: Test & Deploy Evaluation Function to AWS Lambda
2+
3+
on:
4+
push:
5+
branches:
6+
- listing_question
7+
workflow_dispatch:
8+
9+
jobs:
10+
test:
11+
name: Test
12+
runs-on: ubuntu-latest
13+
14+
strategy:
15+
fail-fast: false
16+
matrix:
17+
python-version: [3.11]
18+
19+
defaults:
20+
run:
21+
working-directory: app/
22+
23+
env:
24+
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
25+
26+
steps:
27+
- name: Checkout
28+
uses: actions/checkout@v2
29+
30+
- name: Set up Python ${{ matrix.python-version }}
31+
uses: actions/setup-python@v2
32+
with:
33+
python-version: ${{ matrix.python-version }}
34+
35+
- name: Install dependencies
36+
run: |
37+
python -m pip install --upgrade pip
38+
python -m pip install flake8 pytest
39+
python -m pip install -r requirements.txt
40+
# python -m nltk.downloader wordnet
41+
# python -m nltk.downloader word2vec_sample
42+
# python -m nltk.downloader brown
43+
# python -m nltk.downloader punkt
44+
# python -m nltk.downloader stopwords
45+
46+
- name: Lint with flake8
47+
run: |
48+
# stop the build if there are Python syntax errors or undefined names
49+
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
50+
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
51+
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
52+
53+
- name: Test Evaluation Function
54+
run: |
55+
pytest -v evaluation_tests.py::TestEvaluationFunction
56+
57+
deploy-dev:
58+
name: Deploy Dev
59+
needs: test
60+
runs-on: ubuntu-latest
61+
environment: production
62+
env:
63+
ECR_REPOSITORY: lambda-feedback-dev-functions-repository
64+
65+
steps:
66+
- name: Checkout
67+
uses: actions/checkout@v2
68+
69+
- name: Set config.json output
70+
id: set_config_var
71+
run: |
72+
content=`cat ./config.json`
73+
# the following lines are only required for multi line json
74+
content="${content//'%'/'%25'}"
75+
content="${content//$'\n'/'%0A'}"
76+
content="${content//$'\r'/'%0D'}"
77+
# end of optional handling for multi line json
78+
echo "::set-output name=configJson::$content"
79+
80+
- name: set Evaluation Function Name
81+
id: set_function_name
82+
run: |
83+
functionName="${{fromJson(steps.set_config_var.outputs.configJson).EvaluationFunctionName}}"
84+
[[ -z "$functionName" ]] && { echo "Add EvaluationFunctionName to config.json" ; exit 1; }
85+
echo "::set-output name=function_name::$functionName"
86+
87+
- name: Configure AWS credentials
88+
uses: aws-actions/configure-aws-credentials@v1
89+
with:
90+
aws-access-key-id: ${{ secrets.LAMBDA_CONTAINER_PIPELINE_AWS_ID }}
91+
aws-secret-access-key: ${{ secrets.LAMBDA_CONTAINER_PIPELINE_AWS_SECRET }}
92+
aws-region: eu-west-2
93+
94+
- name: Login to Amazon ECR
95+
id: login-ecr
96+
uses: aws-actions/amazon-ecr-login@v1
97+
98+
- name: Build, tag, and push image to Amazon ECR
99+
id: build-image
100+
env:
101+
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
102+
IMAGE_TAG: ${{ steps.set_function_name.outputs.function_name }}
103+
run: |
104+
# Build docker image from algorithm, schema and requirements
105+
docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG app/
106+
# Push image to ECR
107+
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
108+
echo "::set-output name=image::$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG"
109+
110+
- name: deploy evaluation function
111+
id: deploy-evaluation-function
112+
env:
113+
BACKEND_API_URL: https://dev-api.lambdafeedback.com
114+
API_KEY: ${{ secrets.FUNCTION_ADMIN_API_KEY }}
115+
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
116+
IMAGE_TAG: ${{ steps.set_function_name.outputs.function_name }}
117+
run: |
118+
curl --location --request POST "$BACKEND_API_URL/grading-function/ensure" \
119+
--header 'content-type: application/json' \
120+
--data-raw "{
121+
\"apiKey\": \"$API_KEY\",
122+
\"dockerImageUri\": \"$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG\",
123+
\"functionName\": \"$IMAGE_TAG\"
124+
}"

app/Dockerfile

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,23 @@
1-
# Base image that bundles AWS Lambda Python 3.8 image with some middleware functions
2-
# FROM base-eval-tmp
3-
FROM rabidsheep55/python-base-eval-layer
1+
FROM rabidsheep55/python-base-eval-layer
2+
# NOTE: this is platform "linux/amd64"
3+
# FROM ghcr.io/lambda-feedback/evaluation-function-base/python:3.11
4+
# NOTE: transformers from huggingface requires platform "linux/arm64"
5+
46

57
WORKDIR /app
68

7-
RUN mkdir /usr/share/nltk_data
8-
RUN mkdir -p /usr/share/nltk_data/corpora /usr/share/nltk_data/models /usr/share/nltk_data/tokenizers
9+
# RUN mkdir /usr/share/nltk_data
10+
# RUN mkdir -p /usr/share/nltk_data/corpora /usr/share/nltk_data/models /usr/share/nltk_data/tokenizers
911

10-
ARG NLTK_DATA=/usr/share/nltk_data
12+
# ARG NLTK_DATA=/usr/share/nltk_data
1113

12-
ENV NLTK_DATA=/usr/share/nltk_data
14+
# ENV NLTK_DATA=/usr/share/nltk_data
1315
# Copy and install any packages/modules needed for your evaluation script.
1416
COPY requirements.txt .
15-
COPY brown_length .
16-
COPY word_freqs .
17-
COPY w2v .
18-
RUN yum install -y wget unzip
17+
# COPY brown_length .
18+
# COPY word_freqs .
19+
# COPY w2v .
20+
# RUN yum install -y wget unzip
1921
RUN pip3 install -r requirements.txt
2022

2123
# # Download NLTK data files
@@ -40,17 +42,17 @@ RUN pip3 install -r requirements.txt
4042
# RUN rm /usr/share/nltk_data/tokenizers/*.zip
4143

4244
# Warnings: those commands sometimes download corrupted zips, so it is better to wget each package from the main site
43-
RUN python -m nltk.downloader wordnet
44-
RUN python -m nltk.downloader word2vec_sample
45-
RUN python -m nltk.downloader brown
46-
RUN python -m nltk.downloader stopwords
47-
RUN python -m nltk.downloader punkt
48-
RUN python -m nltk.downloader punkt_tab
45+
# RUN python -m nltk.downloader wordnet
46+
# RUN python -m nltk.downloader word2vec_sample
47+
# RUN python -m nltk.downloader brown
48+
# RUN python -m nltk.downloader stopwords
49+
# RUN python -m nltk.downloader punkt
50+
# RUN python -m nltk.downloader punkt_tab
4951

5052
# Copy the evaluation and testing scripts
51-
COPY brown_length ./app/
52-
COPY word_freqs ./app/
53-
COPY w2v ./app/
53+
# COPY brown_length ./app/
54+
# COPY word_freqs ./app/
55+
# COPY w2v ./app/
5456
COPY evaluation.py ./app/
5557
COPY evaluation_tests.py ./app/
5658

app/evaluation.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -54,14 +54,14 @@ def setup_llm(param):
5454
max_tokens=param.max_new_token,
5555
openai_api_key=param.openai_api_key
5656
)
57-
elif param.mode == 'llama3':
58-
from langchain_huggingface import HuggingFaceEndpoint
59-
return HuggingFaceEndpoint(
60-
endpoint_url=param.endpoint_3_1_8B,
61-
max_new_tokens=param.max_new_token,
62-
temperature=param.temperature,
63-
huggingfacehub_api_token=param.huggingfacehub_api_token
64-
)
57+
# elif config.mode == 'llama3': # NOTE: langchain_huggingface expected "linux/arm64"
58+
# from langchain_huggingface import HuggingFaceEndpoint
59+
# return HuggingFaceEndpoint(
60+
# endpoint_url=config.endpoint_3_1_8B,
61+
# max_new_tokens=config.max_new_token,
62+
# temperature=config.temperature,
63+
# huggingfacehub_api_token=config.huggingfacehub_api_token
64+
# )
6565

6666

6767
def parse_last_boolean(response):

app/evaluation_tests.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
import unittest
2-
from evaluation import evaluation_function, Param
2+
try:
3+
from .evaluation import evaluation_function, Config
4+
except ImportError:
5+
from evaluation import evaluation_function, Config
36

47
class TestEvaluationFunction(unittest.TestCase):
58
"""

app/requirements.txt

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ numpy
22
nltk==3.8.1
33
gensim
44
matplotlib
5+
pytest
56

67
# To run on cli: /Applications/Python\ 3.11/Install\ Certificates.command
78
# If SSL cert fail on Mac -> command above calls pip install --upgrade certifi -> then calling nltk.download works
@@ -11,4 +12,5 @@ certifi
1112
pandas
1213
langchain
1314
langchain-openai
14-
python-dotenv
15+
python-dotenv
16+
# langchain_huggingface

config.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
"EvaluationFunctionName": "shortTextAnswer"
3-
}
2+
"EvaluationFunctionName": "listingQuestion"
3+
}

0 commit comments

Comments
 (0)