From cec1d9b74d1cc6adbeda9b2e8f223e10d938b66c Mon Sep 17 00:00:00 2001 From: nsavinda Date: Wed, 16 Oct 2024 11:18:02 +0530 Subject: [PATCH 1/2] feat:grading API --- app/__init__.py | 4 +++ app/core/log.py | 2 +- app/data/gradePrompts.py | 23 ++++++++++++++++ app/routers/grading.py | 28 ++++++++++++++++++++ app/services/generate.py | 16 +++++------ app/services/grade.py | 57 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 121 insertions(+), 9 deletions(-) create mode 100644 app/data/gradePrompts.py create mode 100644 app/routers/grading.py create mode 100644 app/services/grade.py diff --git a/app/__init__.py b/app/__init__.py index 4be4e47..69b97bb 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -3,9 +3,13 @@ from .routers.upload import router as upload_router from .routers.questionGenerate import router as questionGenerate_router +from .routers.grading import router as grade_router + from .main import app # Include routers with appropriate API version prefix app.include_router(upload_router, prefix="/api/v1") app.include_router(questionGenerate_router, prefix="/api/v1") +app.include_router(grade_router, prefix="/api/v1") + diff --git a/app/core/log.py b/app/core/log.py index 23e4f21..d7a9351 100644 --- a/app/core/log.py +++ b/app/core/log.py @@ -37,7 +37,7 @@ # logger = setup_logger() def setup_logger(): - logmode = os.getenv("LOG_MODE", "logstash").lower() + logmode = os.getenv("LOG_MODE", "console").lower() if logmode == "console": logging.basicConfig(level=logging.INFO) diff --git a/app/data/gradePrompts.py b/app/data/gradePrompts.py new file mode 100644 index 0000000..c1df8b5 --- /dev/null +++ b/app/data/gradePrompts.py @@ -0,0 +1,23 @@ +from typing import List +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.output_parsers import JsonOutputParser + + + +class GradePromptParser(BaseModel): + correct_points: List[str] = Field(description="The correct points for the answer.") + incorrect_points: List[str] = Field(description="The incorrect points for the answer.") + + + + +def grade_prompt(question: str, answer:str, valid_points:List[str]) -> tuple[str, JsonOutputParser]: + """ + Generates a prompt for grading an answer along with a JSON output parser. + + Returns: + tuple[str, JsonOutputParser]: A tuple containing the prompt and the JSON output parser. + """ + prompt_text = f"Return the correct and incorrect points for the answer to the question: {question} with the answer: {answer} and valid points: {valid_points}" + parser = JsonOutputParser(pydantic_object=GradePromptParser) + return (prompt_text, parser) diff --git a/app/routers/grading.py b/app/routers/grading.py new file mode 100644 index 0000000..364a8f1 --- /dev/null +++ b/app/routers/grading.py @@ -0,0 +1,28 @@ +from fastapi import APIRouter, HTTPException, Query +from pydantic import BaseModel + +from ..services.grade import grade + +from ..core.log import logger + +class gradeRequest(BaseModel): + question: str + answer: str + valid_points: list[str] + + + +router = APIRouter() + +@router.post("/grade/", response_model=dict) +async def grade_answer(request: gradeRequest) -> dict: + """Endpoint to grade an answer.""" + try: + print(request.valid_points) + print("Sending grade answer") + grade_result = grade(request.question, request.answer, request.valid_points) + return grade_result + except Exception as e: + logger.error(f"An error occurred while grading the answer: {str(e)}") + raise HTTPException(status_code=500, detail=f"An error occurred while grading the answer: {str(e)}") + diff --git a/app/services/generate.py b/app/services/generate.py index baaabc4..f8fbc4e 100644 --- a/app/services/generate.py +++ b/app/services/generate.py @@ -16,14 +16,14 @@ dotenv.load_dotenv() -class QuestionParser(BaseModel): - question: str = Field(description="The question generated from the text.") - answer: str = Field(description="The answer to the generated question.") - -class MultipleChoiceQuestionParser(BaseModel): - question: str = Field(description="The multiple choice question generated from the text.") - options: list[str] = Field(description="The options for the multiple choice question.") - answer: int = Field(description="The index of the correct answer in the options list.") +# class QuestionParser(BaseModel): +# question: str = Field(description="The question generated from the text.") +# answer: str = Field(description="The answer to the generated question.") + +# class MultipleChoiceQuestionParser(BaseModel): +# question: str = Field(description="The multiple choice question generated from the text.") +# options: list[str] = Field(description="The options for the multiple choice question.") +# answer: int = Field(description="The index of the correct answer in the options list.") class QuestionGenerator: def __init__(self, examid: str): diff --git a/app/services/grade.py b/app/services/grade.py new file mode 100644 index 0000000..33fc114 --- /dev/null +++ b/app/services/grade.py @@ -0,0 +1,57 @@ +import os +import dotenv +import json +from langchain_openai import OpenAIEmbeddings, ChatOpenAI +from langchain_pinecone import PineconeVectorStore +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.output_parsers import JsonOutputParser +from langchain.prompts import PromptTemplate + +from ..data import gradePrompts +from ..core import log + +## generate List +from pinecone.grpc import PineconeGRPC as Pinecone + + +dotenv.load_dotenv() + + +class Grader: + def __init__(self, question: str, answer: str, valid_points: list[str]): + self.question = question + self.answer = answer + self.valid_points = ",".join(valid_points) + + print(self.valid_points) + + log.logger.info("Grader initialized") + self.llm = ChatOpenAI( + model="gpt-4o", + api_key=os.getenv('OPENAI_API_KEY'), + model_kwargs={"response_format": {"type": "json_object"}} + ) + + def grade(self): + prompt_text, parser = gradePrompts.grade_prompt(self.question, self.answer, self.valid_points) + + prompt_template = PromptTemplate( + template="Grade the answer to the question: {question} with the answer: {answer} and valid points: {valid_points}. return the correct and incorrect points as a json object", + input_variables=["question", "answer", "valid_points"], + partial_variables={"format_instructions": parser.get_format_instructions()} + + ) + + chain = prompt_template | self.llm | parser + result = chain.invoke({"question": self.question, "answer": self.answer, "valid_points": self.valid_points}) + print("Grading the answer.") + + print(result) + return result + + + +# valid_points is list of strings +def grade (question: str, answer: str, valid_points: list[str]): + grader = Grader(question, answer, valid_points) + return grader.grade() \ No newline at end of file From 583b2c15a535ac3ffd30609bb9ae05956da33e5f Mon Sep 17 00:00:00 2001 From: nsavinda Date: Wed, 16 Oct 2024 22:38:50 +0530 Subject: [PATCH 2/2] fix:change prompt --- app/services/grade.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/services/grade.py b/app/services/grade.py index 33fc114..1328e59 100644 --- a/app/services/grade.py +++ b/app/services/grade.py @@ -36,7 +36,7 @@ def grade(self): prompt_text, parser = gradePrompts.grade_prompt(self.question, self.answer, self.valid_points) prompt_template = PromptTemplate( - template="Grade the answer to the question: {question} with the answer: {answer} and valid points: {valid_points}. return the correct and incorrect points as a json object", + template="Grade the answer to the question: {question} with the answer: {answer} and valid points: {valid_points}. return the correct and incorrect points(return exact point only included in answer) as a json object", input_variables=["question", "answer", "valid_points"], partial_variables={"format_instructions": parser.get_format_instructions()}