From e29fac3b9a01da16fa41fbdaf8e17e88fda0df5b Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Tue, 16 Sep 2025 15:44:27 -0700 Subject: [PATCH 01/54] Add README for LF LLM demo --- llm/README.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 llm/README.md diff --git a/llm/README.md b/llm/README.md new file mode 100644 index 0000000..3b7b658 --- /dev/null +++ b/llm/README.md @@ -0,0 +1,2 @@ +# LLM Demo + From 053b8d713906ee55d9535aab92ae4fdc34285329 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Tue, 16 Sep 2025 16:33:49 -0700 Subject: [PATCH 02/54] Adding work in progress code files for an llm example. Files: llm.py, which calls the llama-2-7b-chat model for simple question and answer, agent_llm.lf, which takes in the user input calls llm agent 1 and llm agent 2. --- llm/src/agent_llm.lf | 44 +++++++++++++++++++++ llm/src/llm.py | 94 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 138 insertions(+) create mode 100644 llm/src/agent_llm.lf create mode 100644 llm/src/llm.py diff --git a/llm/src/agent_llm.lf b/llm/src/agent_llm.lf new file mode 100644 index 0000000..e5c7f90 --- /dev/null +++ b/llm/src/agent_llm.lf @@ -0,0 +1,44 @@ +target Python{ + files: llm_textgeneration.py +}; + +preamble{= + from llm import agent1, agent2 + +=} + +reactor llm_a{ + + output user_in + reaction (startup)-> user_in{= + txt = input("Hey there!") + user_in.set(txt) + =} +} + +reactor llm_b{ + input llm_a_in + output llm_b_out + reaction (llm_a_in)-> llm_b_out{= + llm_b_out.set(llm_a_in.value) + =} +} + +main reactor{ + state response + user_response = new llm_a() + llm_response = new llm_b() + // call llm a to respond to user + reaction (user_response.user_in)->llm_response.llm_a_in{= + + response = agent1(user_response.user_in.value) + llm_response.llm_a_in.set(response) + =} + + //llm b to respond to what llm a generated + reaction (llm_response.llm_b_out){= + # llm_response.llm_a_in = response + agent2(llm_response.llm_b_out.value) + =} + +} \ No newline at end of file diff --git a/llm/src/llm.py b/llm/src/llm.py new file mode 100644 index 0000000..63b6234 --- /dev/null +++ b/llm/src/llm.py @@ -0,0 +1,94 @@ +### Import Libraries +import transformers +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig +from torch import cuda, bfloat16 + +### Add Your hugging face token here +hf_auth = "Add here" + +### Model to be chosen to act as an agent +model_id = "meta-llama/Llama-2-7b-chat-hf" + +### To check if there is GPU +has_cuda = torch.cuda.is_available() + +### To convert the model into 4bit quantization +bnb_config = None +if has_cuda: + try: + import bitsandbytes as bnb + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=torch.bfloat16, + ) + except Exception: + bnb_config = None + +### calling pre-trained tokenizer +tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) + + +### calling pre-trained model +model = AutoModelForCausalLM.from_pretrained( + model_id, + token=hf_auth, + device_map="auto" if has_cuda else None, + torch_dtype=torch.bfloat16 if has_cuda else torch.float32, + quantization_config=bnb_config, + low_cpu_mem_usage=True, +) + +model.eval() + +### agent 1 +def agent1(a): + user_query = a + + prompt = f"You are a helpful assistant.\n\n{user_query}\n" + + inputs = tokenizer(prompt, return_tensors="pt") + + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + + with torch.no_grad(): + outputs = model.generate( + **inputs, + max_new_tokens=100, + do_sample=True, + temperature=0.3, + ) + + gen_tokens = outputs[0] + prompt_len = inputs["input_ids"].shape[1] + response = tokenizer.decode(gen_tokens[prompt_len:], skip_special_tokens=True) + + print("LLM A response:", response) + return response + +### agent 2 +def agent2(b): + user_query = b + + prompt = f"Just summarize what the agent1 said: \n\n{user_query}\n\n" + + inputs = tokenizer(prompt, return_tensors="pt") + + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + + with torch.no_grad(): + outputs = model.generate( + **inputs, + max_new_tokens=100, + do_sample=True, + temperature=0.3, + ) + + gen_tokens = outputs[0] + prompt_len = inputs["input_ids"].shape[1] + response = tokenizer.decode(gen_tokens[prompt_len:], skip_special_tokens=True) + print("LLM B response:", response) \ No newline at end of file From 473c81f2978a7465e780c9e914c4ca869655d173 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Tue, 16 Sep 2025 16:37:32 -0700 Subject: [PATCH 03/54] changed the file name of the file to be included in agent_llm.lf --- llm/src/agent_llm.lf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/src/agent_llm.lf b/llm/src/agent_llm.lf index e5c7f90..35176fd 100644 --- a/llm/src/agent_llm.lf +++ b/llm/src/agent_llm.lf @@ -1,5 +1,5 @@ target Python{ - files: llm_textgeneration.py + files: llm.py }; preamble{= From 46522a14c9020089c117f67f39c5635c2e720bf0 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Fri, 19 Sep 2025 11:00:19 -0700 Subject: [PATCH 04/54] Added a quiz game. It is a game between two LLM models answering user questions and the model to respond the fastest wins --- llm/src/agent_llm.lf | 8 +- llm/src/llm.py | 114 +++++++++++----------- llm/src/llm_quiz_game.lf | 197 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 257 insertions(+), 62 deletions(-) create mode 100644 llm/src/llm_quiz_game.lf diff --git a/llm/src/agent_llm.lf b/llm/src/agent_llm.lf index 35176fd..5b5ab8e 100644 --- a/llm/src/agent_llm.lf +++ b/llm/src/agent_llm.lf @@ -7,7 +7,7 @@ preamble{= =} -reactor llm_a{ +reactor LLM_a{ output user_in reaction (startup)-> user_in{= @@ -16,7 +16,7 @@ reactor llm_a{ =} } -reactor llm_b{ +reactor LLM_b{ input llm_a_in output llm_b_out reaction (llm_a_in)-> llm_b_out{= @@ -26,8 +26,8 @@ reactor llm_b{ main reactor{ state response - user_response = new llm_a() - llm_response = new llm_b() + user_response = new LLM_a() + llm_response = new LLM_b() // call llm a to respond to user reaction (user_response.user_in)->llm_response.llm_a_in{= diff --git a/llm/src/llm.py b/llm/src/llm.py index 63b6234..93322f1 100644 --- a/llm/src/llm.py +++ b/llm/src/llm.py @@ -5,16 +5,19 @@ from torch import cuda, bfloat16 ### Add Your hugging face token here -hf_auth = "Add here" +hf_auth = "Add your token here" ### Model to be chosen to act as an agent model_id = "meta-llama/Llama-2-7b-chat-hf" +model_id_2 = "meta-llama/Llama-2-70b-chat-hf" -### To check if there is GPU +### To check if there is GPU and convert it into float 16 has_cuda = torch.cuda.is_available() +dtype = torch.bfloat16 if has_cuda else torch.float32 ### To convert the model into 4bit quantization bnb_config = None +### if there is cuda then the model is converted to 4bit quantization if has_cuda: try: import bitsandbytes as bnb @@ -22,73 +25,68 @@ load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, - bnb_4bit_compute_dtype=torch.bfloat16, + bnb_4bit_compute_dtype=dtype, ) except Exception: bnb_config = None ### calling pre-trained tokenizer -tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) - - -### calling pre-trained model -model = AutoModelForCausalLM.from_pretrained( - model_id, - token=hf_auth, +tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, token=hf_auth, use_fast=True) +for tok in (tokenizer, tokenizer_2): + if tok.pad_token_id is None: + tok.pad_token = tok.eos_token + +### since both the models have same device map and using 4bit quantization for both +common = dict( device_map="auto" if has_cuda else None, - torch_dtype=torch.bfloat16 if has_cuda else torch.float32, - quantization_config=bnb_config, + dtype=dtype, low_cpu_mem_usage=True, ) +if bnb_config is not None: + common["quantization_config"] = bnb_config -model.eval() - -### agent 1 -def agent1(a): - user_query = a - - prompt = f"You are a helpful assistant.\n\n{user_query}\n" - +### calling pre-trained model +model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_auth, **common) +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, token=hf_auth, **common) +model.eval(); model_2.eval() + + + +### arguments for both the models +GEN_A = dict(max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id) +GEN_B = dict(max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer_2.eos_token_id, pad_token_id=tokenizer_2.pad_token_id) + +###to resturn only one line answers +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + +###Calling agent1 from .lf code +def agent1(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" inputs = tokenizer(prompt, return_tensors="pt") - - if has_cuda: - inputs = {k: v.to("cuda") for k, v in inputs.items()} - + if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} with torch.no_grad(): - outputs = model.generate( - **inputs, - max_new_tokens=100, - do_sample=True, - temperature=0.3, - ) - - gen_tokens = outputs[0] + out = model.generate(**inputs, **GEN_A) prompt_len = inputs["input_ids"].shape[1] - response = tokenizer.decode(gen_tokens[prompt_len:], skip_special_tokens=True) - - print("LLM A response:", response) - return response - -### agent 2 -def agent2(b): - user_query = b - - prompt = f"Just summarize what the agent1 said: \n\n{user_query}\n\n" - - inputs = tokenizer(prompt, return_tensors="pt") - - if has_cuda: - inputs = {k: v.to("cuda") for k, v in inputs.items()} - + result = tokenizer.decode(out[0][prompt_len:], skip_special_tokens=True) + return postprocess(result) + +###Calling agent2 from .lf code +def agent2(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer_2(prompt, return_tensors="pt") + if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} with torch.no_grad(): - outputs = model.generate( - **inputs, - max_new_tokens=100, - do_sample=True, - temperature=0.3, - ) - - gen_tokens = outputs[0] + out = model_2.generate(**inputs, **GEN_B) prompt_len = inputs["input_ids"].shape[1] - response = tokenizer.decode(gen_tokens[prompt_len:], skip_special_tokens=True) - print("LLM B response:", response) \ No newline at end of file + result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) + return postprocess(result) \ No newline at end of file diff --git a/llm/src/llm_quiz_game.lf b/llm/src/llm_quiz_game.lf new file mode 100644 index 0000000..85e89a4 --- /dev/null +++ b/llm/src/llm_quiz_game.lf @@ -0,0 +1,197 @@ +### llm.py file needs to be in the same directory +target Python { keepalive: true, files: ["llm.py"] } + +preamble {= + import threading + import time + from llm import agent1, agent2 # your Python functions + + def keyboard_prompt(reactor, action): + while True: + time.sleep(5) + action.schedule(None) +=} + +### Reactor for handling user keyboard input +reactor KeyboardInput { + state th + state terminate = False + state eof = False + state buffer = "" + + physical action line + output prompt + output quit + + reaction(startup) -> line {= + def reader(): + while not self.terminate: + + s = input("Enter the quiz question\n") + if s == "": + self.eof = True + line.schedule(0) + break + elif s.lower().strip() == "quit": + self.eof = True + line.schedule(0) + break + else: + self.buffer = s + line.schedule(1) + self.th = threading.Thread(target=reader, daemon=True) + self.th.start() + =} + + reaction(line) -> prompt, quit {= + if self.eof: + quit.set() + environment().sync_shutdown() + else: + prompt.set(self.buffer) + =} + + reaction(shutdown) {= + self.terminate = True + if self.th and self.th.is_alive(): + self.th.join() + =} +} + +### Reactor for calling agent 1 +reactor LlmA { + state th + state running = False + state out_buffer = "" + + input user_in + physical action done + output answer + + + reaction(user_in) -> done {= + if self.running: + return + self.running = True + query = user_in.value + def agentA(): + try: + self.out_buffer = agent1(query) + finally: + done.schedule(1) + self.th = threading.Thread(target=agentA, daemon=True) + self.th.start() + =} + + reaction(done) -> answer {= + self.running = False + answer.set(self.out_buffer) + =} +} + + +### Reactor for calling agent 2 +reactor LlmB { + state th + state running = False + state out_buffer = "" + input user_in + output answer + + physical action done + + reaction(user_in)->done{= + if self.running: + return + self.running = True + query = user_in.value + def agentB(): + try: + self.out_buffer = agent2(query) + finally: + done.schedule(1) + self.th = threading.Thread(target=agentB, daemon=True) + self.th.start() + =} + + reaction(done)->answer{= + self.running = False + answer.set(self.out_buffer) + =} + +} + +###Judge reactor to determine which agent responds first +reactor Judge{ + input query + input llma + input llmb + output ask + + state waiting = False + state logical_base_time = 0 + state physical_base_time = 0 + state winner = "" + + logical action timeout(60 sec) + + reaction(query) -> timeout, ask {= + self.waiting = True + self.winner = "" + self.logical_base_time = lf.time.logical_elapsed() + self.physical_base_time = lf.time.physical_elapsed() + timeout.schedule(0) + print(f"\n\n\nQuery: {query.value}\n") + print("waiting...\n") + ask.set(query.value) + =} + + reaction(llma) {= + if not self.waiting: + return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms") + print(f"{llma.value}") + =} + + reaction(llmb) {= + if not self.waiting: + return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms") + print(f"{llmb.value}") + =} + + reaction(timeout) {= + if not self.waiting: + return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") + =} +} + + +main reactor { + llma_response = new LlmA() + llmb_response = new LlmB() + keyboard = new KeyboardInput() + j = new Judge() + + keyboard.prompt -> j.query + j.ask -> llma_response.user_in + j.ask -> llmb_response.user_in + llma_response.answer -> j.llma + llmb_response.answer -> j.llmb +} \ No newline at end of file From 9d9ee262ac2adcc677d09d4ecac9e27669c2e864 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Fri, 19 Sep 2025 11:26:33 -0700 Subject: [PATCH 05/54] Updated the README.md for instructions to run the quiz game --- llm/README.md | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/llm/README.md b/llm/README.md index 3b7b658..c7b4000 100644 --- a/llm/README.md +++ b/llm/README.md @@ -1,2 +1,95 @@ # LLM Demo +# Overview +This is a quiz-style game between two LLM agents. For each user question typed at the keyboard, both agents answer in parallel. The Judge announces whichever answer arrives first (or a timeout if neither responds within 60 sec), and prints per-question elapsed logical and physical times. + +# Pre-requisites + +You need Python installed, as llm.py is written in Python. + +## Library Dependencies +To run this project, the following dependencies are required. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. +While newer versions of other dependencies may work, the specific versions listed below have been tested and are recommended for optimal performance. + +It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. The available options for environment setup are listed below. + +``` +pip install accelerate +pip install transformers +pip install tokenizers +pip install bitsandbytes>=0.43.0 +pip install torch +pip install torchvision +``` + +## System Requirements + +To ensure optimal performance, the following hardware and software requirements are utilized. \ +**Note:** To replicate this model, you can use any equivalent hardware that meets the computational requirements. + +### Hardware Requirements +- **GPU**: NVIDIA RTX A6000 + +### Software Requirements +- **Python** (Ensure Python is installed) +- **CUDA Version**: 12.8 +- **NVIDIA-SMI**: For monitoring GPU performance and memory utilization + +### Model Dependencies +- **Pre-trained Models**: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) +**Note:** Please access and use the pre-trained models, authentication keys must be obtained from the [Hugging Face repository](https://huggingface.co/settings/tokens). Ensure you have a valid API token and configure authentication. + +Make sure the environment is properly configured to use CUDA for optimal GPU acceleration. + +# Files and directories in this repository + - **`llm.py`** - Contains the logic to load and call LLM models from the Hugging Face pretrained hub. + - **`llm_quiz_game.lf`** - Lingua Franca program that defines the quiz game reactors (Keyboard input, LLM agents, and Judge). + +# Execution Workflow + +### Step 1: +Run the **`llm_quiz_game.lf`**. + +**Note:** +- Ensure that you specify the correct file paths + +Run the following commands: + +``` +lfc src/llm_quiz_game.lf +``` + +### Step 2: Run the binary file and input the quiz question +Run the following commands: + +``` +./bin/llm_quiz_game +``` + +The system will ask for entering the quiz question which is to be obtained from the keyboard input. + +Example output printed on the terminal: + +
+
+--------------------------------------------------
+---- System clock resolution: 1 nsec
+---- Start execution on Fri Sep 19 10:46:31 2025 ---- plus 772215861 nanoseconds
+Enter the quiz question
+What is the capital of South Korea?
+Query: What is the capital of South Korea?
+
+waiting...
+
+Winner: LLM-B | logical 1184 ms | physical 1184 ms
+Answer: Seoul.
+--------------------------------------------------
+
+
+ +### Step 3: Monitoring GPU Performance (Optional) +In another terminal, monitor GPU performance and memory utilization while running the scripts, please use NVIDIA-SMI: +``` +nvidia-smi +``` +# Contributors From fe1f6054081268e9272d552c75342aaf9a8de9e1 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Fri, 19 Sep 2025 12:09:07 -0700 Subject: [PATCH 06/54] Removing the older version of the file agent_llm.lf --- llm/src/agent_llm.lf | 44 -------------------------------------------- 1 file changed, 44 deletions(-) delete mode 100644 llm/src/agent_llm.lf diff --git a/llm/src/agent_llm.lf b/llm/src/agent_llm.lf deleted file mode 100644 index 5b5ab8e..0000000 --- a/llm/src/agent_llm.lf +++ /dev/null @@ -1,44 +0,0 @@ -target Python{ - files: llm.py -}; - -preamble{= - from llm import agent1, agent2 - -=} - -reactor LLM_a{ - - output user_in - reaction (startup)-> user_in{= - txt = input("Hey there!") - user_in.set(txt) - =} -} - -reactor LLM_b{ - input llm_a_in - output llm_b_out - reaction (llm_a_in)-> llm_b_out{= - llm_b_out.set(llm_a_in.value) - =} -} - -main reactor{ - state response - user_response = new LLM_a() - llm_response = new LLM_b() - // call llm a to respond to user - reaction (user_response.user_in)->llm_response.llm_a_in{= - - response = agent1(user_response.user_in.value) - llm_response.llm_a_in.set(response) - =} - - //llm b to respond to what llm a generated - reaction (llm_response.llm_b_out){= - # llm_response.llm_a_in = response - agent2(llm_response.llm_b_out.value) - =} - -} \ No newline at end of file From b0206643f229046eb82e3b59249a0ff493e3efef Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Mon, 22 Sep 2025 12:16:07 -0700 Subject: [PATCH 07/54] Modified comments to the program --- llm/src/llm_quiz_game.lf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/src/llm_quiz_game.lf b/llm/src/llm_quiz_game.lf index 85e89a4..cd9584a 100644 --- a/llm/src/llm_quiz_game.lf +++ b/llm/src/llm_quiz_game.lf @@ -4,7 +4,7 @@ target Python { keepalive: true, files: ["llm.py"] } preamble {= import threading import time - from llm import agent1, agent2 # your Python functions + from llm import agent1, agent2 def keyboard_prompt(reactor, action): while True: From cc0a08a5e504e562e4faeccbbfeef829627d25dd Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Tue, 23 Sep 2025 16:28:33 -0700 Subject: [PATCH 08/54] created the files for quiz game between two llm models using main reactor and also added a federated execution --- llm/src/llm_a.py | 77 +++++++ llm/src/llm_b.py | 78 +++++++ llm/src/llm_base_class.lf | 176 ++++++++++++++ llm/src/llm_base_class_federate.lf | 354 ++++++++++++++++++++++++++++ llm/src/llm_game_federated.lf | 40 ++++ llm/src/llm_quiz_game.lf | 359 +++++++++++++++-------------- 6 files changed, 907 insertions(+), 177 deletions(-) create mode 100644 llm/src/llm_a.py create mode 100644 llm/src/llm_b.py create mode 100644 llm/src/llm_base_class.lf create mode 100644 llm/src/llm_base_class_federate.lf create mode 100644 llm/src/llm_game_federated.lf diff --git a/llm/src/llm_a.py b/llm/src/llm_a.py new file mode 100644 index 0000000..df5faf3 --- /dev/null +++ b/llm/src/llm_a.py @@ -0,0 +1,77 @@ +# llm_a.py — Agent 1 (7B) + +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +# <<< put your token here >>> +hf_auth = "add token here " + +# Model to be chosen to act as an agent +model_id = "meta-llama/Llama-2-7b-chat-hf" + +# Require GPU (you said it must work only on GPU) +has_cuda = torch.cuda.is_available() +if not has_cuda: + raise RuntimeError("CUDA GPU required for this configuration.") +dtype = torch.bfloat16 if has_cuda else torch.float32 + +# 4-bit quantization +bnb_config = None +if has_cuda: + try: + import bitsandbytes as bnb # noqa: F401 + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=dtype, + ) + except Exception: + bnb_config = None + +# Tokenizer +tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) +if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + +# Shared kwargs +common = dict( + device_map="auto" if has_cuda else None, + dtype=dtype, + low_cpu_mem_usage=True, +) +if bnb_config is not None: + common["quantization_config"] = bnb_config + +# Model +model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_auth, **common) +model.eval() + +# Generation args +GEN_A = dict( + max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id +) + +# One-line postprocess +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + +# Agent 1 entrypoint +def agent1(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer(prompt, return_tensors="pt") + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = model.generate(**inputs, **GEN_A) + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer.decode(out[0][prompt_len:], skip_special_tokens=True) + print(result) + return postprocess(result) \ No newline at end of file diff --git a/llm/src/llm_b.py b/llm/src/llm_b.py new file mode 100644 index 0000000..513d6c2 --- /dev/null +++ b/llm/src/llm_b.py @@ -0,0 +1,78 @@ + +# llm_b.py — Agent 2 (70B) + +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +# <<< put your token here >>> +hf_auth = "add token here" + +# Model to be chosen to act as an agent +model_id_2 = "meta-llama/Llama-2-70b-chat-hf" + +# Require GPU (GPU-only) +has_cuda = torch.cuda.is_available() +if not has_cuda: + raise RuntimeError("CUDA GPU required for this configuration.") +dtype = torch.bfloat16 if has_cuda else torch.float32 + +# 4-bit quantization +bnb_config = None +if has_cuda: + try: + import bitsandbytes as bnb # noqa: F401 + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=dtype, + ) + except Exception: + bnb_config = None + +# Tokenizer +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, token=hf_auth, use_fast=True) +if tokenizer_2.pad_token_id is None: + tokenizer_2.pad_token = tokenizer_2.eos_token + +# Shared kwargs +common = dict( + device_map="auto" if has_cuda else None, + dtype=dtype, + low_cpu_mem_usage=True, +) +if bnb_config is not None: + common["quantization_config"] = bnb_config + +# Model +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, token=hf_auth, **common) +model_2.eval() + +# Generation args +GEN_B = dict( + max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer_2.eos_token_id, pad_token_id=tokenizer_2.pad_token_id +) + +# One-line postprocess +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + +# Agent 2 entrypoint +def agent2(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer_2(prompt, return_tensors="pt") + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = model_2.generate(**inputs, **GEN_B) + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) + print(result) + return postprocess(result) \ No newline at end of file diff --git a/llm/src/llm_base_class.lf b/llm/src/llm_base_class.lf new file mode 100644 index 0000000..d1eae4e --- /dev/null +++ b/llm/src/llm_base_class.lf @@ -0,0 +1,176 @@ +target Python + +### Reactor for handling user keyboard input +reactor KeyboardInput { + state th + state terminate = False + state eof = False + state buffer = "" + + physical action line + output prompt + output quit + + reaction(startup) -> line {= + def reader(): + while not self.terminate: + + s = input("Enter the quiz question\n") + if s == "": + self.eof = True + line.schedule(0) + break + elif s.lower().strip() == "quit": + self.eof = True + line.schedule(0) + break + else: + self.buffer = s + line.schedule(1) + self.th = threading.Thread(target=reader, daemon=True) + self.th.start() + =} + + reaction(line) -> prompt, quit {= + if self.eof: + quit.set() + environment().sync_shutdown() + else: + prompt.set(self.buffer) + =} + + reaction(shutdown) {= + self.terminate = True + if self.th and self.th.is_alive(): + self.th.join() + =} +} + + + +### Reactor for calling agent 1 +reactor LlmA { + state th + state running = False + state out_buffer = "" + + input user_in + physical action done + output answer + + + reaction(user_in) -> done {= + if self.running: + return + self.running = True + query = user_in.value + def agentA(): + try: + self.out_buffer = agent1(query) + finally: + done.schedule(1) + self.th = threading.Thread(target=agentA, daemon=True) + self.th.start() + =} + + reaction(done) -> answer {= + self.running = False + answer.set(self.out_buffer) + =} +} + + + +// ### Reactor for calling agent 2 +reactor LlmB { + state th + state running = False + state out_buffer = "" + input user_in + output answer + + physical action done + + reaction(user_in)->done{= + if self.running: + return + self.running = True + query = user_in.value + def agentB(): + try: + self.out_buffer = agent2(query) + finally: + done.schedule(1) + self.th = threading.Thread(target=agentB, daemon=True) + self.th.start() + =} + + reaction(done)->answer{= + self.running = False + answer.set(self.out_buffer) + =} + +} + + + +// ###Judge reactor to determine which agent responds first +reactor Judge{ + input query + input llma + input llmb + output ask + + state waiting = False + state logical_base_time = 0 + state physical_base_time = 0 + state winner = "" + + logical action timeout(60 sec) + + reaction(query) -> timeout, ask {= + self.waiting = True + self.winner = "" + self.logical_base_time = lf.time.logical_elapsed() + self.physical_base_time = lf.time.physical_elapsed() + timeout.schedule(0) + print(f"\n\n\nQuery: {query.value}\n") + print("waiting...\n") + ask.set(query.value) + =} + + reaction(llma) {= + if not self.waiting: + return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms") + print(f"{llma.value}") + =} + + reaction(llmb) {= + if not self.waiting: + return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms") + print(f"{llmb.value}") + =} + + reaction(timeout) {= + if not self.waiting: + return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") + =} +} diff --git a/llm/src/llm_base_class_federate.lf b/llm/src/llm_base_class_federate.lf new file mode 100644 index 0000000..c5638d8 --- /dev/null +++ b/llm/src/llm_base_class_federate.lf @@ -0,0 +1,354 @@ +target Python + +### Reactor for handling user keyboard input + +// reactor KeyboardInput { +// state th +// state terminate = False +// state eof = False +// state buffer = "" + +// physical action line +// output prompt +// output quit + +// reaction(startup) -> line {= +// import sys +// import threading +// import time + +// def reader(): +// while not self.terminate: +// s = input("Enter the quiz question\n") +// if s == "": +// self.eof = True +// try: line.schedule(0) +// except Exception as e: print("[keyboard] schedule EOF failed:", e, flush=True) +// break +// elif s.lower().strip() == "quit": +// self.eof = True +// try: line.schedule(0) +// except Exception as e: print("[keyboard] schedule quit failed:", e, flush=True) +// break +// else: +// self.buffer = s +// try: line.schedule(1) # small logical hop +// except Exception as e: +// print("[keyboard] schedule failed:", e, flush=True) +// break +// self.th = threading.Thread(target=reader, daemon=True) +// self.th.start() +// =} + +// reaction(line) -> prompt, quit {= +// if self.eof: +// quit.set() +// environment().sync_shutdown() +// else: +// prompt.set(self.buffer) +// =} + +// reaction(shutdown) {= +// self.terminate = True +// if self.th and self.th.is_alive(): +// self.th.join() +// =} +// } + +### Reactor for calling agent 1 +reactor LlmA { + state th + state running = False + state out_buffer = "" + state ready = False + + input user_in + physical action done + physical action notify_ready + output answer + output ready_out + + reaction(startup) {= + import os, sys, importlib.util, threading + def _load(): + try: + here = os.path.dirname(__file__) + if here not in sys.path: sys.path.insert(0, here) + from llm_a import agent1 + notify_ready.schedule(0) + except Exception as e: + print("[LlmA] Preload failed:", e, flush=True) + threading.Thread(target=_load, daemon=True).start() + =} + + reaction(notify_ready) -> ready_out {= + self.ready = True + ready_out.set(True) + =} + + reaction(user_in) -> done {= + import threading + if not self.ready: return + if self.running: return + self.running = True + q = user_in.value + from llm_a import agent1 + def agentA(): + try: + self.out_buffer = agent1(q) + finally: + try: done.schedule(5) + except Exception as e: print("[LlmA] schedule failed:", e, flush=True) + self.th = threading.Thread(target=agentA, daemon=True) + self.th.start() + =} + + reaction(done) -> answer {= + self.running = False + answer.set(self.out_buffer) + =} +} + +// ### Reactor for calling agent 2 +reactor LlmB { + state th + state running = False + state out_buffer = "" + state ready = False + + input user_in + physical action done + physical action notify_ready + output answer + output ready_out + + reaction(startup) {= + import os, sys, importlib.util, threading + def _load(): + try: + here = os.path.dirname(__file__) + if here not in sys.path: sys.path.insert(0, here) + from llm_b import agent2 + notify_ready.schedule(0) + except Exception as e: + print("[LlmB] Preload failed:", e, flush=True) + threading.Thread(target=_load, daemon=True).start() + =} + + reaction(notify_ready) -> ready_out {= + self.ready = True + ready_out.set(True) + =} + + reaction(user_in) -> done {= + import threading + if not self.ready: return + if self.running: return + self.running = True + q = user_in.value + from llm_b import agent2 + def agentB(): + try: + self.out_buffer = agent2(q) + finally: + try: done.schedule(5) + except Exception as e: print("[LlmB] schedule failed:", e, flush=True) + self.th = threading.Thread(target=agentB, daemon=True) + self.th.start() + =} + + reaction(done) -> answer {= + self.running = False + answer.set(self.out_buffer) + =} +} +// ###Judge reactor to determine which agent responds first +// reactor Judge{ +// input query +// input llma +// input llmb +// output ask + +// state waiting = False +// state logical_base_time = 0 +// state physical_base_time = 0 +// state winner = "" + +// logical action timeout(60 sec) + +// reaction(query) -> timeout, ask {= +// self.waiting = True +// self.winner = "" +// self.logical_base_time = lf.time.logical_elapsed() +// self.physical_base_time = lf.time.physical_elapsed() +// timeout.schedule(0) +// print(f"\n\n\nQuery: {query.value}\n") +// print("waiting...\n") +// ask.set(query.value) +// =} + +// reaction(llma) {= +// if not self.waiting: +// return +// self.waiting = False +// logical_now = lf.time.logical_elapsed() +// physical_now = lf.time.physical_elapsed() +// logical_ms = int((logical_now - self.logical_base_time) / 1000000) +// physical_ms = int((physical_now - self.physical_base_time) / 1000000) +// print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms") +// print(f"{llma.value}") +// =} + +// reaction(llmb) {= +// if not self.waiting: +// return +// self.waiting = False +// logical_now = lf.time.logical_elapsed() +// physical_now = lf.time.physical_elapsed() +// logical_ms = int((logical_now - self.logical_base_time) / 1000000) +// physical_ms = int((physical_now - self.physical_base_time) / 1000000) +// print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms") +// print(f"{llmb.value}") +// =} + +// reaction(timeout) {= +// if not self.waiting: +// return +// self.waiting = False +// logical_now = lf.time.logical_elapsed() +// physical_now = lf.time.physical_elapsed() +// logical_ms = int((logical_now - self.logical_base_time) / 1000000) +// physical_ms = int((physical_now - self.physical_base_time) / 1000000) +// print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") +// =} +// } + +reactor Judge { + state th + state reader_started = False + state terminate = False + state eof = False + state buffer = "" + state waiting = False + state logical_base_time = 0 + state physical_base_time = 0 + input ready_a + input ready_b + state a_ready = False + state b_ready = False + physical action line + physical action tick + logical action timeout(60 sec) + output ask + input llma + input llmb + output quit + + reaction(startup) {= + print("[Judge] Waiting for models to load...", flush=True) + =} + + reaction(ready_a) {= + self.a_ready = True + if self.a_ready and self.b_ready and not self.reader_started: + import sys, threading + def reader(): + while not self.terminate: + s = input("Enter the quiz question (or 'quit')\n") + if s == "" or s.lower().strip() == "quit": + self.eof = True + try: line.schedule(0) + except Exception as e: print("[Judge] schedule EOF failed:", e, flush=True) + break + else: + self.buffer = s + try: line.schedule(1) + except Exception as e: + print("[Judge] schedule line failed:", e, flush=True) + break + self.reader_started = True + print("[Judge] Models ready. You can ask questions now.", flush=True) + self.th = threading.Thread(target=reader, daemon=True) + self.th.start() + =} + + reaction(ready_b) {= + self.b_ready = True + if self.a_ready and self.b_ready and not self.reader_started: + import sys, threading + def reader(): + while not self.terminate: + s = input("Enter the quiz question (or 'quit')\n") + if s == "" or s.lower().strip() == "quit": + self.eof = True + try: line.schedule(0) + except Exception as e: print("[Judge] schedule EOF failed:", e, flush=True) + break + else: + self.buffer = s + try: line.schedule(1) + except Exception as e: + print("[Judge] schedule line failed:", e, flush=True) + break + self.reader_started = True + print("[Judge] Models ready. You can ask questions now.", flush=True) + self.th = threading.Thread(target=reader, daemon=True) + self.th.start() + =} + + reaction(line) -> tick, ask, timeout, quit {= + if self.eof: + quit.set() + environment().sync_shutdown() + else: + self.waiting = True + self.logical_base_time = lf.time.logical_elapsed() + self.physical_base_time = lf.time.physical_elapsed() + timeout.schedule(0) + print(f"\n\n\nQuery: {self.buffer}\n", flush=True) + print("waiting...\n", flush=True) + tick.schedule(5) + =} + + reaction(tick) -> ask {= + ask.set(self.buffer) + =} + + reaction(llma) {= + if not self.waiting: return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + print(f"{llma.value}", flush=True) + =} + + reaction(llmb) {= + if not self.waiting: return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + print(f"{llmb.value}", flush=True) + =} + + reaction(timeout) {= + if not self.waiting: return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + =} + + reaction(shutdown) {= + self.terminate = True + if self.th and self.th.is_alive(): + self.th.join() + =} +} \ No newline at end of file diff --git a/llm/src/llm_game_federated.lf b/llm/src/llm_game_federated.lf new file mode 100644 index 0000000..0a4dcd0 --- /dev/null +++ b/llm/src/llm_game_federated.lf @@ -0,0 +1,40 @@ +### llm.py file needs to be in the same directory +target Python { keepalive: true, files: ["llm_a.py", "llm_b.py"] } +// import KeyboardInput from "llm_base_class_federate.lf" +import LlmA from "llm_base_class_federate.lf" +import LlmB from "llm_base_class_federate.lf" +import Judge from "llm_base_class_federate.lf" + +preamble {= + import threading + import time + from llm_a import agent1 + from llm_b import agent2 +=} + + +federated reactor llm_game_federated at 10.218.100.95 { + // llma_response_f = new LlmA() + // llmb_response_f = new LlmB() + // keyboard_f = new KeyboardInput() + // j_f = new Judge() + + // keyboard_f.prompt -> j_f.query + // j_f.ask -> llma_response_f.user_in + // j_f.ask -> llmb_response_f.user_in + // llma_response_f.answer -> j_f.llma + // llmb_response_f.answer -> j_f.llmb + j = new Judge() + llma = new LlmA() + llmb = new LlmB() + + j.ask -> llma.user_in + j.ask -> llmb.user_in + llma.answer -> j.llma + llmb.answer -> j.llmb + + llma.ready_out -> j.ready_a + llmb.ready_out -> j.ready_b + +} + diff --git a/llm/src/llm_quiz_game.lf b/llm/src/llm_quiz_game.lf index cd9584a..7ba9d6b 100644 --- a/llm/src/llm_quiz_game.lf +++ b/llm/src/llm_quiz_game.lf @@ -1,188 +1,17 @@ ### llm.py file needs to be in the same directory target Python { keepalive: true, files: ["llm.py"] } +import KeyboardInput from "llm_base_class.lf" +import LlmA from "llm_base_class.lf" +import LlmB from "llm_base_class.lf" +import Judge from "llm_base_class.lf" + preamble {= import threading import time from llm import agent1, agent2 - - def keyboard_prompt(reactor, action): - while True: - time.sleep(5) - action.schedule(None) =} -### Reactor for handling user keyboard input -reactor KeyboardInput { - state th - state terminate = False - state eof = False - state buffer = "" - - physical action line - output prompt - output quit - - reaction(startup) -> line {= - def reader(): - while not self.terminate: - - s = input("Enter the quiz question\n") - if s == "": - self.eof = True - line.schedule(0) - break - elif s.lower().strip() == "quit": - self.eof = True - line.schedule(0) - break - else: - self.buffer = s - line.schedule(1) - self.th = threading.Thread(target=reader, daemon=True) - self.th.start() - =} - - reaction(line) -> prompt, quit {= - if self.eof: - quit.set() - environment().sync_shutdown() - else: - prompt.set(self.buffer) - =} - - reaction(shutdown) {= - self.terminate = True - if self.th and self.th.is_alive(): - self.th.join() - =} -} - -### Reactor for calling agent 1 -reactor LlmA { - state th - state running = False - state out_buffer = "" - - input user_in - physical action done - output answer - - - reaction(user_in) -> done {= - if self.running: - return - self.running = True - query = user_in.value - def agentA(): - try: - self.out_buffer = agent1(query) - finally: - done.schedule(1) - self.th = threading.Thread(target=agentA, daemon=True) - self.th.start() - =} - - reaction(done) -> answer {= - self.running = False - answer.set(self.out_buffer) - =} -} - - -### Reactor for calling agent 2 -reactor LlmB { - state th - state running = False - state out_buffer = "" - input user_in - output answer - - physical action done - - reaction(user_in)->done{= - if self.running: - return - self.running = True - query = user_in.value - def agentB(): - try: - self.out_buffer = agent2(query) - finally: - done.schedule(1) - self.th = threading.Thread(target=agentB, daemon=True) - self.th.start() - =} - - reaction(done)->answer{= - self.running = False - answer.set(self.out_buffer) - =} - -} - -###Judge reactor to determine which agent responds first -reactor Judge{ - input query - input llma - input llmb - output ask - - state waiting = False - state logical_base_time = 0 - state physical_base_time = 0 - state winner = "" - - logical action timeout(60 sec) - - reaction(query) -> timeout, ask {= - self.waiting = True - self.winner = "" - self.logical_base_time = lf.time.logical_elapsed() - self.physical_base_time = lf.time.physical_elapsed() - timeout.schedule(0) - print(f"\n\n\nQuery: {query.value}\n") - print("waiting...\n") - ask.set(query.value) - =} - - reaction(llma) {= - if not self.waiting: - return - self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms") - print(f"{llma.value}") - =} - - reaction(llmb) {= - if not self.waiting: - return - self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms") - print(f"{llmb.value}") - =} - - reaction(timeout) {= - if not self.waiting: - return - self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") - =} -} - - main reactor { llma_response = new LlmA() llmb_response = new LlmB() @@ -194,4 +23,180 @@ main reactor { j.ask -> llmb_response.user_in llma_response.answer -> j.llma llmb_response.answer -> j.llmb -} \ No newline at end of file +} + + +// def keyboard_prompt(reactor, action): + // while True: + // time.sleep(5) + // action.schedule(None) + +// ### Reactor for handling user keyboard input +// reactor KeyboardInput { +// state th +// state terminate = False +// state eof = False +// state buffer = "" + +// physical action line +// output prompt +// output quit + +// reaction(startup) -> line {= +// def reader(): +// while not self.terminate: + +// s = input("Enter the quiz question\n") +// if s == "": +// self.eof = True +// line.schedule(0) +// break +// elif s.lower().strip() == "quit": +// self.eof = True +// line.schedule(0) +// break +// else: +// self.buffer = s +// line.schedule(1) +// self.th = threading.Thread(target=reader, daemon=True) +// self.th.start() +// =} + +// reaction(line) -> prompt, quit {= +// if self.eof: +// quit.set() +// environment().sync_shutdown() +// else: +// prompt.set(self.buffer) +// =} + +// reaction(shutdown) {= +// self.terminate = True +// if self.th and self.th.is_alive(): +// self.th.join() +// =} +// } + +// ### Reactor for calling agent 1 +// reactor LlmA { +// state th +// state running = False +// state out_buffer = "" + +// input user_in +// physical action done +// output answer + + +// reaction(user_in) -> done {= +// if self.running: +// return +// self.running = True +// query = user_in.value +// def agentA(): +// try: +// self.out_buffer = agent1(query) +// finally: +// done.schedule(1) +// self.th = threading.Thread(target=agentA, daemon=True) +// self.th.start() +// =} + +// reaction(done) -> answer {= +// self.running = False +// answer.set(self.out_buffer) +// =} +// } + + +// ### Reactor for calling agent 2 +// reactor LlmB { +// state th +// state running = False +// state out_buffer = "" +// input user_in +// output answer + +// physical action done + +// reaction(user_in)->done{= +// if self.running: +// return +// self.running = True +// query = user_in.value +// def agentB(): +// try: +// self.out_buffer = agent2(query) +// finally: +// done.schedule(1) +// self.th = threading.Thread(target=agentB, daemon=True) +// self.th.start() +// =} + +// reaction(done)->answer{= +// self.running = False +// answer.set(self.out_buffer) +// =} + +// } + +// ###Judge reactor to determine which agent responds first +// reactor Judge{ +// input query +// input llma +// input llmb +// output ask + +// state waiting = False +// state logical_base_time = 0 +// state physical_base_time = 0 +// state winner = "" + +// logical action timeout(60 sec) + +// reaction(query) -> timeout, ask {= +// self.waiting = True +// self.winner = "" +// self.logical_base_time = lf.time.logical_elapsed() +// self.physical_base_time = lf.time.physical_elapsed() +// timeout.schedule(0) +// print(f"\n\n\nQuery: {query.value}\n") +// print("waiting...\n") +// ask.set(query.value) +// =} + +// reaction(llma) {= +// if not self.waiting: +// return +// self.waiting = False +// logical_now = lf.time.logical_elapsed() +// physical_now = lf.time.physical_elapsed() +// logical_ms = int((logical_now - self.logical_base_time) / 1000000) +// physical_ms = int((physical_now - self.physical_base_time) / 1000000) +// print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms") +// print(f"{llma.value}") +// =} + +// reaction(llmb) {= +// if not self.waiting: +// return +// self.waiting = False +// logical_now = lf.time.logical_elapsed() +// physical_now = lf.time.physical_elapsed() +// logical_ms = int((logical_now - self.logical_base_time) / 1000000) +// physical_ms = int((physical_now - self.physical_base_time) / 1000000) +// print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms") +// print(f"{llmb.value}") +// =} + +// reaction(timeout) {= +// if not self.waiting: +// return +// self.waiting = False +// logical_now = lf.time.logical_elapsed() +// physical_now = lf.time.physical_elapsed() +// logical_ms = int((logical_now - self.logical_base_time) / 1000000) +// physical_ms = int((physical_now - self.physical_base_time) / 1000000) +// print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") +// =} +// } From 632dc8eda58f7fd244305353d6898896277733a6 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Tue, 23 Sep 2025 16:37:59 -0700 Subject: [PATCH 09/54] Adding the git ignore file --- .gitignore | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..eed972c --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +llm/fed-gen/ +llm/src-gen/ +llm/include/ +llm/bin +**__pycache__** +llm/=** \ No newline at end of file From 6c8117de13058b11e88771b69dc9bee887b8a335 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 25 Sep 2025 11:58:32 -0700 Subject: [PATCH 10/54] Fixed the issue for the judge federate to receive the signal that model is loaded --- llm/src/llm_base_class_federate.lf | 140 ++++------------------------- llm/src/llm_game_federated.lf | 7 +- 2 files changed, 19 insertions(+), 128 deletions(-) diff --git a/llm/src/llm_base_class_federate.lf b/llm/src/llm_base_class_federate.lf index c5638d8..235b703 100644 --- a/llm/src/llm_base_class_federate.lf +++ b/llm/src/llm_base_class_federate.lf @@ -1,60 +1,5 @@ target Python -### Reactor for handling user keyboard input - -// reactor KeyboardInput { -// state th -// state terminate = False -// state eof = False -// state buffer = "" - -// physical action line -// output prompt -// output quit - -// reaction(startup) -> line {= -// import sys -// import threading -// import time - -// def reader(): -// while not self.terminate: -// s = input("Enter the quiz question\n") -// if s == "": -// self.eof = True -// try: line.schedule(0) -// except Exception as e: print("[keyboard] schedule EOF failed:", e, flush=True) -// break -// elif s.lower().strip() == "quit": -// self.eof = True -// try: line.schedule(0) -// except Exception as e: print("[keyboard] schedule quit failed:", e, flush=True) -// break -// else: -// self.buffer = s -// try: line.schedule(1) # small logical hop -// except Exception as e: -// print("[keyboard] schedule failed:", e, flush=True) -// break -// self.th = threading.Thread(target=reader, daemon=True) -// self.th.start() -// =} - -// reaction(line) -> prompt, quit {= -// if self.eof: -// quit.set() -// environment().sync_shutdown() -// else: -// prompt.set(self.buffer) -// =} - -// reaction(shutdown) {= -// self.terminate = True -// if self.th and self.th.is_alive(): -// self.th.join() -// =} -// } - ### Reactor for calling agent 1 reactor LlmA { state th @@ -64,20 +9,22 @@ reactor LlmA { input user_in physical action done - physical action notify_ready + physical action notify_ready output answer output ready_out - reaction(startup) {= - import os, sys, importlib.util, threading + reaction(startup) -> notify_ready {= + import os, sys, importlib.util, threading, traceback + act = notify_ready def _load(): try: here = os.path.dirname(__file__) if here not in sys.path: sys.path.insert(0, here) from llm_a import agent1 - notify_ready.schedule(0) + act.schedule(1) except Exception as e: print("[LlmA] Preload failed:", e, flush=True) + traceback.print_exc() threading.Thread(target=_load, daemon=True).start() =} @@ -118,20 +65,22 @@ reactor LlmB { input user_in physical action done - physical action notify_ready + physical action notify_ready output answer output ready_out - reaction(startup) {= - import os, sys, importlib.util, threading + reaction(startup) -> notify_ready {= + import os, sys, importlib.util, threading, traceback + act = notify_ready def _load(): try: here = os.path.dirname(__file__) if here not in sys.path: sys.path.insert(0, here) from llm_b import agent2 - notify_ready.schedule(0) + act.schedule(1) except Exception as e: print("[LlmB] Preload failed:", e, flush=True) + traceback.print_exc() threading.Thread(target=_load, daemon=True).start() =} @@ -163,65 +112,6 @@ reactor LlmB { =} } // ###Judge reactor to determine which agent responds first -// reactor Judge{ -// input query -// input llma -// input llmb -// output ask - -// state waiting = False -// state logical_base_time = 0 -// state physical_base_time = 0 -// state winner = "" - -// logical action timeout(60 sec) - -// reaction(query) -> timeout, ask {= -// self.waiting = True -// self.winner = "" -// self.logical_base_time = lf.time.logical_elapsed() -// self.physical_base_time = lf.time.physical_elapsed() -// timeout.schedule(0) -// print(f"\n\n\nQuery: {query.value}\n") -// print("waiting...\n") -// ask.set(query.value) -// =} - -// reaction(llma) {= -// if not self.waiting: -// return -// self.waiting = False -// logical_now = lf.time.logical_elapsed() -// physical_now = lf.time.physical_elapsed() -// logical_ms = int((logical_now - self.logical_base_time) / 1000000) -// physical_ms = int((physical_now - self.physical_base_time) / 1000000) -// print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms") -// print(f"{llma.value}") -// =} - -// reaction(llmb) {= -// if not self.waiting: -// return -// self.waiting = False -// logical_now = lf.time.logical_elapsed() -// physical_now = lf.time.physical_elapsed() -// logical_ms = int((logical_now - self.logical_base_time) / 1000000) -// physical_ms = int((physical_now - self.physical_base_time) / 1000000) -// print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms") -// print(f"{llmb.value}") -// =} - -// reaction(timeout) {= -// if not self.waiting: -// return -// self.waiting = False -// logical_now = lf.time.logical_elapsed() -// physical_now = lf.time.physical_elapsed() -// logical_ms = int((logical_now - self.logical_base_time) / 1000000) -// physical_ms = int((physical_now - self.physical_base_time) / 1000000) -// print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") -// =} -// } reactor Judge { state th @@ -245,10 +135,10 @@ reactor Judge { output quit reaction(startup) {= - print("[Judge] Waiting for models to load...", flush=True) + print("[Judge] Waiting for models to load", flush=True) =} - reaction(ready_a) {= + reaction(ready_a)->line {= self.a_ready = True if self.a_ready and self.b_ready and not self.reader_started: import sys, threading @@ -272,7 +162,7 @@ reactor Judge { self.th.start() =} - reaction(ready_b) {= + reaction(ready_b)->line {= self.b_ready = True if self.a_ready and self.b_ready and not self.reader_started: import sys, threading diff --git a/llm/src/llm_game_federated.lf b/llm/src/llm_game_federated.lf index 0a4dcd0..5111854 100644 --- a/llm/src/llm_game_federated.lf +++ b/llm/src/llm_game_federated.lf @@ -1,9 +1,10 @@ ### llm.py file needs to be in the same directory target Python { keepalive: true, files: ["llm_a.py", "llm_b.py"] } // import KeyboardInput from "llm_base_class_federate.lf" -import LlmA from "llm_base_class_federate.lf" -import LlmB from "llm_base_class_federate.lf" -import Judge from "llm_base_class_federate.lf" +// import LlmA from "llm_base_class_federate.lf" +// import LlmB from "llm_base_class_federate.lf" +// import Judge from "llm_base_class_federate.lf" +import LlmA, LlmB, Judge from "llm_base_class_federate.lf" preamble {= import threading From 2f1a884b43f59d6cc7af7819171a3a90b4452856 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 25 Sep 2025 16:09:09 -0700 Subject: [PATCH 11/54] Added the version of files for running on different devices --- llm/src/llm_a.py | 10 +-- llm/src/llm_b.py | 10 +-- llm/src/llm_b_m2.py | 102 +++++++++++++++++++++++++++++ llm/src/llm_base_class_federate.lf | 8 +-- llm/src/llm_game_federated.lf | 26 ++------ 5 files changed, 123 insertions(+), 33 deletions(-) create mode 100644 llm/src/llm_b_m2.py diff --git a/llm/src/llm_a.py b/llm/src/llm_a.py index df5faf3..15411cd 100644 --- a/llm/src/llm_a.py +++ b/llm/src/llm_a.py @@ -1,4 +1,4 @@ -# llm_a.py — Agent 1 (7B) +# llm_a.py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig @@ -6,10 +6,10 @@ # <<< put your token here >>> hf_auth = "add token here " -# Model to be chosen to act as an agent +# Model model_id = "meta-llama/Llama-2-7b-chat-hf" -# Require GPU (you said it must work only on GPU) +# Require GPU has_cuda = torch.cuda.is_available() if not has_cuda: raise RuntimeError("CUDA GPU required for this configuration.") @@ -19,7 +19,7 @@ bnb_config = None if has_cuda: try: - import bitsandbytes as bnb # noqa: F401 + import bitsandbytes as bnb bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", @@ -63,7 +63,7 @@ def postprocess(text: str) -> str: break return t.strip().strip(":").strip() -# Agent 1 entrypoint +# Agent 1 def agent1(q: str) -> str: prompt = f"You are a concise Q&A assistant.\n\n{q}\n" inputs = tokenizer(prompt, return_tensors="pt") diff --git a/llm/src/llm_b.py b/llm/src/llm_b.py index 513d6c2..6acb7d9 100644 --- a/llm/src/llm_b.py +++ b/llm/src/llm_b.py @@ -1,5 +1,5 @@ -# llm_b.py — Agent 2 (70B) +# llm_b.py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig @@ -7,10 +7,10 @@ # <<< put your token here >>> hf_auth = "add token here" -# Model to be chosen to act as an agent +# Model model_id_2 = "meta-llama/Llama-2-70b-chat-hf" -# Require GPU (GPU-only) +# Require GPU has_cuda = torch.cuda.is_available() if not has_cuda: raise RuntimeError("CUDA GPU required for this configuration.") @@ -20,7 +20,7 @@ bnb_config = None if has_cuda: try: - import bitsandbytes as bnb # noqa: F401 + import bitsandbytes as bnb bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", @@ -64,7 +64,7 @@ def postprocess(text: str) -> str: break return t.strip().strip(":").strip() -# Agent 2 entrypoint +# Agent 2 def agent2(q: str) -> str: prompt = f"You are a concise Q&A assistant.\n\n{q}\n" inputs = tokenizer_2(prompt, return_tensors="pt") diff --git a/llm/src/llm_b_m2.py b/llm/src/llm_b_m2.py new file mode 100644 index 0000000..45bad45 --- /dev/null +++ b/llm/src/llm_b_m2.py @@ -0,0 +1,102 @@ +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +hf_auth = "add your token here" + +model_id_2 = "google/gemma-3-270m" + +has_cuda = torch.cuda.is_available() +has_mps = torch.backends.mps.is_available() + +if has_cuda: + device = torch.device("cuda") + compute_dtype = torch.float16 +elif has_mps: + device = torch.device("mps") + compute_dtype = torch.float32 +else: + device = torch.device("cpu") + compute_dtype = torch.float32 + + +common = dict( + low_cpu_mem_usage=True, + attn_implementation="eager", +) + +#4-bit on CUDA if the device has it +if has_cuda: + try: + import bitsandbytes as bnb + common["quantization_config"] = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=compute_dtype, + ) + common["device_map"] = "auto" + except Exception: + print("[WARN] bitsandbytes not available; using full-precision fp16 on CUDA.", flush=True) + common["device_map"] = "auto" +else: + common["device_map"] = None + +# Tokenizer +tok_kwargs = dict(use_fast=True) +if hf_auth: + tok_kwargs["token"] = hf_auth +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, **tok_kwargs) +if tokenizer_2.pad_token_id is None: + tokenizer_2.pad_token = tokenizer_2.eos_token + +# Model +mp_kwargs = dict(dtype=compute_dtype, **common) +if hf_auth: + mp_kwargs["token"] = hf_auth + +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, **mp_kwargs) +if not has_cuda: + model_2.to(device) +model_2.eval() + +# Greedy decoding +GEN_B = dict( + max_new_tokens=32, + do_sample=True, + eos_token_id=tokenizer_2.eos_token_id, + pad_token_id=tokenizer_2.pad_token_id, +) + +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + i = t.find(sep) + if i > 0: + t = t[:i] + break + return t.strip().strip(":").strip() + +def agent2(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer_2(prompt, return_tensors="pt") + + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + elif has_mps: + inputs = {k: v.to("mps") for k, v in inputs.items()} + else: + inputs = {k: v.to("cpu") for k, v in inputs.items()} + + with torch.inference_mode(): + out = model_2.generate(**inputs, **GEN_B) + + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) + print(result) + return postprocess(result) + +# def main(): +# agent2("what is AI?") + +# if __name__ == "__main__": +# main() \ No newline at end of file diff --git a/llm/src/llm_base_class_federate.lf b/llm/src/llm_base_class_federate.lf index 235b703..194d49a 100644 --- a/llm/src/llm_base_class_federate.lf +++ b/llm/src/llm_base_class_federate.lf @@ -56,7 +56,7 @@ reactor LlmA { =} } -// ### Reactor for calling agent 2 +### Reactor for calling agent 2 reactor LlmB { state th state running = False @@ -76,7 +76,7 @@ reactor LlmB { try: here = os.path.dirname(__file__) if here not in sys.path: sys.path.insert(0, here) - from llm_b import agent2 + from llm_b_m2 import agent2 act.schedule(1) except Exception as e: print("[LlmB] Preload failed:", e, flush=True) @@ -95,7 +95,7 @@ reactor LlmB { if self.running: return self.running = True q = user_in.value - from llm_b import agent2 + from llm_b_m2 import agent2 def agentB(): try: self.out_buffer = agent2(q) @@ -111,7 +111,7 @@ reactor LlmB { answer.set(self.out_buffer) =} } -// ###Judge reactor to determine which agent responds first +###Judge reactor to determine which agent responds first reactor Judge { state th diff --git a/llm/src/llm_game_federated.lf b/llm/src/llm_game_federated.lf index 5111854..d2b0834 100644 --- a/llm/src/llm_game_federated.lf +++ b/llm/src/llm_game_federated.lf @@ -1,33 +1,21 @@ ### llm.py file needs to be in the same directory -target Python { keepalive: true, files: ["llm_a.py", "llm_b.py"] } -// import KeyboardInput from "llm_base_class_federate.lf" -// import LlmA from "llm_base_class_federate.lf" -// import LlmB from "llm_base_class_federate.lf" -// import Judge from "llm_base_class_federate.lf" +target Python { keepalive: true, files: ["llm_a.py", "llm_b_m2.py" ] } #"llm_b.py" + import LlmA, LlmB, Judge from "llm_base_class_federate.lf" preamble {= import threading import time from llm_a import agent1 - from llm_b import agent2 + from llm_b_m2 import agent2 =} federated reactor llm_game_federated at 10.218.100.95 { - // llma_response_f = new LlmA() - // llmb_response_f = new LlmB() - // keyboard_f = new KeyboardInput() - // j_f = new Judge() - - // keyboard_f.prompt -> j_f.query - // j_f.ask -> llma_response_f.user_in - // j_f.ask -> llmb_response_f.user_in - // llma_response_f.answer -> j_f.llma - // llmb_response_f.answer -> j_f.llmb - j = new Judge() - llma = new LlmA() - llmb = new LlmB() + + j = new Judge() + llma = new LlmA() + llmb = new LlmB() j.ask -> llma.user_in j.ask -> llmb.user_in From 1958fbb8aceeff3634d074a8ba8d3452e6e4337d Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 9 Oct 2025 11:01:23 -0700 Subject: [PATCH 12/54] Adding a python script for llama 3.2 1B for jetson orin --- llm/src/llm_b_jetson.py | 52 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 llm/src/llm_b_jetson.py diff --git a/llm/src/llm_b_jetson.py b/llm/src/llm_b_jetson.py new file mode 100644 index 0000000..7dc94fa --- /dev/null +++ b/llm/src/llm_b_jetson.py @@ -0,0 +1,52 @@ +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +hf_auth = "" + +model_id = "meta-llama/Llama-3.2-1B" + +has_cuda = torch.cuda.is_available() +device = torch.device("cuda" if has_cuda else "cpu") +compute_dtype = torch.float16 if has_cuda else torch.float32 + +common = dict( + low_cpu_mem_usage=True, + attn_implementation="eager", +) + +tok_kwargs = dict(use_fast=True) +if hf_auth: + tok_kwargs["token"] = hf_auth + +tokenizer = AutoTokenizer.from_pretrained(model_id, **tok_kwargs) +if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + +mp_kwargs = dict(torch_dtype=compute_dtype, **common) +if hf_auth: + mp_kwargs["token"] = hf_auth + +model = AutoModelForCausalLM.from_pretrained(model_id, **mp_kwargs) +model.to(device) +model.eval() + +GEN = dict( + max_new_tokens=64, + do_sample=True, + temperature=0.7, + top_p=0.95, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id, +) + +def agent2(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer(prompt, return_tensors="pt").to(device) + with torch.inference_mode(): + out = model.generate(**inputs, **GEN) + gen = out[0, inputs["input_ids"].shape[1]:] + return tokenizer.decode(gen, skip_special_tokens=True).strip() + +if __name__ == "__main__": + question = "What is the capital of Japan?" + print(agent2(question)) From 60f642d11f09fc6f49a9c3fce21401f743f07674 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 9 Oct 2025 12:27:06 -0700 Subject: [PATCH 13/54] commented the code for testing --- llm/src/llm_b_jetson.py | 87 ++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 41 deletions(-) diff --git a/llm/src/llm_b_jetson.py b/llm/src/llm_b_jetson.py index 7dc94fa..40461ed 100644 --- a/llm/src/llm_b_jetson.py +++ b/llm/src/llm_b_jetson.py @@ -1,52 +1,57 @@ -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer +# import torch +# from transformers import AutoModelForCausalLM, AutoTokenizer -hf_auth = "" +# hf_auth = "" -model_id = "meta-llama/Llama-3.2-1B" +# model_id = "meta-llama/Llama-3.2-1B" -has_cuda = torch.cuda.is_available() -device = torch.device("cuda" if has_cuda else "cpu") -compute_dtype = torch.float16 if has_cuda else torch.float32 +# has_cuda = torch.cuda.is_available() +# device = torch.device("cuda" if has_cuda else "cpu") +# compute_dtype = torch.float16 if has_cuda else torch.float32 -common = dict( - low_cpu_mem_usage=True, - attn_implementation="eager", -) +# common = dict( +# low_cpu_mem_usage=True, +# attn_implementation="eager", +# ) -tok_kwargs = dict(use_fast=True) -if hf_auth: - tok_kwargs["token"] = hf_auth +# tok_kwargs = dict(use_fast=True) +# if hf_auth: +# tok_kwargs["token"] = hf_auth -tokenizer = AutoTokenizer.from_pretrained(model_id, **tok_kwargs) -if tokenizer.pad_token_id is None: - tokenizer.pad_token = tokenizer.eos_token +# tokenizer = AutoTokenizer.from_pretrained(model_id, **tok_kwargs) +# if tokenizer.pad_token_id is None: +# tokenizer.pad_token = tokenizer.eos_token -mp_kwargs = dict(torch_dtype=compute_dtype, **common) -if hf_auth: - mp_kwargs["token"] = hf_auth +# mp_kwargs = dict(torch_dtype=compute_dtype, **common) +# if hf_auth: +# mp_kwargs["token"] = hf_auth -model = AutoModelForCausalLM.from_pretrained(model_id, **mp_kwargs) -model.to(device) -model.eval() +# model = AutoModelForCausalLM.from_pretrained(model_id, **mp_kwargs) +# model.to(device) +# model.eval() + +# GEN = dict( +# max_new_tokens=64, +# do_sample=True, +# temperature=0.7, +# top_p=0.95, +# eos_token_id=tokenizer.eos_token_id, +# pad_token_id=tokenizer.pad_token_id, +# ) + +# def agent2(q: str) -> str: +# prompt = f"You are a concise Q&A assistant.\n\n{q}\n" +# inputs = tokenizer(prompt, return_tensors="pt").to(device) +# with torch.inference_mode(): +# out = model.generate(**inputs, **GEN) +# gen = out[0, inputs["input_ids"].shape[1]:] +# return tokenizer.decode(gen, skip_special_tokens=True).strip() + +# if __name__ == "__main__": +# question = "What is the capital of Japan?" +# print(agent2(question)) -GEN = dict( - max_new_tokens=64, - do_sample=True, - temperature=0.7, - top_p=0.95, - eos_token_id=tokenizer.eos_token_id, - pad_token_id=tokenizer.pad_token_id, -) def agent2(q: str) -> str: - prompt = f"You are a concise Q&A assistant.\n\n{q}\n" - inputs = tokenizer(prompt, return_tensors="pt").to(device) - with torch.inference_mode(): - out = model.generate(**inputs, **GEN) - gen = out[0, inputs["input_ids"].shape[1]:] - return tokenizer.decode(gen, skip_special_tokens=True).strip() - -if __name__ == "__main__": - question = "What is the capital of Japan?" - print(agent2(question)) + + return "Hello this is jetson" \ No newline at end of file From 6a26cab3fc73cff14795fee19cdd107777949023 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 9 Oct 2025 12:29:42 -0700 Subject: [PATCH 14/54] Testing Jetson --- llm/src/llm_game_federated.lf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/src/llm_game_federated.lf b/llm/src/llm_game_federated.lf index d2b0834..863abca 100644 --- a/llm/src/llm_game_federated.lf +++ b/llm/src/llm_game_federated.lf @@ -1,5 +1,5 @@ ### llm.py file needs to be in the same directory -target Python { keepalive: true, files: ["llm_a.py", "llm_b_m2.py" ] } #"llm_b.py" +target Python { keepalive: true, files: ["llm_a.py", "llm_b_jetson.py" ] } #"llm_b.py" import LlmA, LlmB, Judge from "llm_base_class_federate.lf" From aef0ac957c855200119bbab82a2e7c8dce735c59 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 9 Oct 2025 12:43:07 -0700 Subject: [PATCH 15/54] Changed the file names in base class --- llm/src/llm_base_class_federate.lf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/src/llm_base_class_federate.lf b/llm/src/llm_base_class_federate.lf index 194d49a..14412a2 100644 --- a/llm/src/llm_base_class_federate.lf +++ b/llm/src/llm_base_class_federate.lf @@ -76,7 +76,7 @@ reactor LlmB { try: here = os.path.dirname(__file__) if here not in sys.path: sys.path.insert(0, here) - from llm_b_m2 import agent2 + from llm_b_jetson import agent2 act.schedule(1) except Exception as e: print("[LlmB] Preload failed:", e, flush=True) @@ -95,7 +95,7 @@ reactor LlmB { if self.running: return self.running = True q = user_in.value - from llm_b_m2 import agent2 + from llm_b_jetson import agent2 def agentB(): try: self.out_buffer = agent2(q) From c4c635372ea379070969773de15f8d95a1ac3e16 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 9 Oct 2025 12:49:56 -0700 Subject: [PATCH 16/54] Changed the RTI to jetson --- llm/src/llm_game_federated.lf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/src/llm_game_federated.lf b/llm/src/llm_game_federated.lf index 863abca..3a9f677 100644 --- a/llm/src/llm_game_federated.lf +++ b/llm/src/llm_game_federated.lf @@ -11,7 +11,7 @@ preamble {= =} -federated reactor llm_game_federated at 10.218.100.95 { +federated reactor llm_game_federated at 10.155.214.175 { j = new Judge() llma = new LlmA() From 9d503d53028c700ac9a00f1ec33b851a424fb407 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 9 Oct 2025 14:12:41 -0700 Subject: [PATCH 17/54] corrected the ip for jetson orin --- llm/src/llm_game_federated.lf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/src/llm_game_federated.lf b/llm/src/llm_game_federated.lf index 3a9f677..d2b745c 100644 --- a/llm/src/llm_game_federated.lf +++ b/llm/src/llm_game_federated.lf @@ -11,7 +11,7 @@ preamble {= =} -federated reactor llm_game_federated at 10.155.214.175 { +federated reactor llm_game_federated at 10.155.241.175 { j = new Judge() llma = new LlmA() From 9a1730b5a0f85e6c76696e7e0dd0b3a1b20f6ec1 Mon Sep 17 00:00:00 2001 From: Hokeun Kim Date: Tue, 14 Oct 2025 09:10:01 -0700 Subject: [PATCH 18/54] Add requirements.txt --- llm/src/requirements.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 llm/src/requirements.txt diff --git a/llm/src/requirements.txt b/llm/src/requirements.txt new file mode 100644 index 0000000..c126cad --- /dev/null +++ b/llm/src/requirements.txt @@ -0,0 +1,7 @@ +accelerate +transformers +tokenizers +bitsandbytes>=0.43.0 +torch +torchvision + From ea20703665e1569f0aca8bbcf662f23825b50714 Mon Sep 17 00:00:00 2001 From: Hokeun Kim Date: Tue, 14 Oct 2025 09:11:35 -0700 Subject: [PATCH 19/54] Move requirements.txt to top dir --- llm/{src => }/requirements.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename llm/{src => }/requirements.txt (100%) diff --git a/llm/src/requirements.txt b/llm/requirements.txt similarity index 100% rename from llm/src/requirements.txt rename to llm/requirements.txt From e16438a1f8208193a92caa1f1024d22fe941e1d3 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Tue, 14 Oct 2025 19:51:01 -0700 Subject: [PATCH 20/54] Adding the organized folders and README.md --- llm/README.md | 23 +- llm/src/federated_execution/README.md | 107 ++++++++ .../llm_base_class_federate.lf | 244 ++++++++++++++++++ .../federated_execution/llm_game_federated.lf | 29 +++ llm/src/python_files/llm.py | 92 +++++++ llm/src/python_files/llm_a.py | 77 ++++++ llm/src/python_files/llm_b.py | 78 ++++++ llm/src/python_files/llm_b_jetson.py | 57 ++++ llm/src/python_files/llm_b_m2.py | 102 ++++++++ 9 files changed, 790 insertions(+), 19 deletions(-) create mode 100644 llm/src/federated_execution/README.md create mode 100644 llm/src/federated_execution/llm_base_class_federate.lf create mode 100644 llm/src/federated_execution/llm_game_federated.lf create mode 100644 llm/src/python_files/llm.py create mode 100644 llm/src/python_files/llm_a.py create mode 100644 llm/src/python_files/llm_b.py create mode 100644 llm/src/python_files/llm_b_jetson.py create mode 100644 llm/src/python_files/llm_b_m2.py diff --git a/llm/README.md b/llm/README.md index c7b4000..f7a0214 100644 --- a/llm/README.md +++ b/llm/README.md @@ -8,19 +8,9 @@ This is a quiz-style game between two LLM agents. For each user question typed a You need Python installed, as llm.py is written in Python. ## Library Dependencies -To run this project, the following dependencies are required. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. +To run this project, there are dependencies required which are in lf-demos/llm/requirements.txt file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. While newer versions of other dependencies may work, the specific versions listed below have been tested and are recommended for optimal performance. - -It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. The available options for environment setup are listed below. - -``` -pip install accelerate -pip install transformers -pip install tokenizers -pip install bitsandbytes>=0.43.0 -pip install torch -pip install torchvision -``` +It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. ## System Requirements @@ -42,8 +32,8 @@ To ensure optimal performance, the following hardware and software requirements Make sure the environment is properly configured to use CUDA for optimal GPU acceleration. # Files and directories in this repository - - **`llm.py`** - Contains the logic to load and call LLM models from the Hugging Face pretrained hub. - - **`llm_quiz_game.lf`** - Lingua Franca program that defines the quiz game reactors (Keyboard input, LLM agents, and Judge). + - **`llm_base_class.lf`** - Contains the base reactors LlmA, LlmB, Keyboard and Judge.. + - **`llm_quiz_game.lf`** - Lingua Franca program that defines the quiz game reactors (Keyboard input, LLM agent A, LLM agent B and Judge). # Execution Workflow @@ -87,9 +77,4 @@ Answer: Seoul. -### Step 3: Monitoring GPU Performance (Optional) -In another terminal, monitor GPU performance and memory utilization while running the scripts, please use NVIDIA-SMI: -``` -nvidia-smi -``` # Contributors diff --git a/llm/src/federated_execution/README.md b/llm/src/federated_execution/README.md new file mode 100644 index 0000000..7ce4fff --- /dev/null +++ b/llm/src/federated_execution/README.md @@ -0,0 +1,107 @@ +# LLM Demo (Federated Execution) + +# Overview +This is a quiz-style game between two LLM agents using federated execution. For each user question asked to the Judge, both agents answer in parallel. The Judge announces whichever answer arrives first (or a timeout if neither responds within 60 sec), and prints per-question elapsed logical and physical times. There are three federates (federate__llma, federate__llmb, federate__j) and an RTI. + +# Pre-requisites + +You need Python installed, as llm_a.py, llm_b.py, llm_b_m2.py and llm_b_jetson.py are written in Python. # any version >= 3.10 + +## Library Dependencies +To run this project, there are dependencies required which are in lf-demos/llm/requirements.txt file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. +While newer versions of other dependencies may work, the specific versions listed below have been tested and are recommended for optimal performance. +It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. + +## System Requirements + +To ensure optimal performance, the following hardware and software requirements are utilized. \ +**Note:** To replicate this model, you can use any equivalent hardware that meets the computational requirements. + +### Hardware Requirements +- **GPU**: NVIDIA RTX A6000 + +### Software Requirements +- **Python** (Ensure Python is installed) +- **CUDA Version**: 12.8 +- **NVIDIA-SMI**: For monitoring GPU performance and memory utilization + +### Model Dependencies +- **Pre-trained Models**: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) +**Note:** Please access and use the pre-trained models, authentication keys must be obtained from the [Hugging Face repository](https://huggingface.co/settings/tokens). Ensure you have a valid API token and configure authentication. + +Make sure the environment is properly configured to use CUDA for optimal GPU acceleration. + +# Files and directories in this repository + - **`llm_base_class_federate.lf`** - Contains the base reactors LlmA, LlmB and Judge. + - **`llm_game_federated.lf`** - Lingua Franca program that defines the quiz game as federated execution. + +# Execution Workflow + +### Step 1: +To compile this specify the RTI host by specifying an IP address here: +``` +federated reactor llm_game_federated at 10.xxx.xxx.xx { +} +``` + +Run the **`llm_game_federated.lf`**. + +**Note:** +- Ensure that you specify the correct file paths + +Run the following commands: + +``` +lfc src/federated_execution/llm_game_federated.lf +``` + +### Step 2: Run the binary file and input the quiz question +Run the following command: + +``` +cd fed-gen/llm_game_federated/ +``` + +In the first terminal run: +``` +./bin/RTI -n 3 + +``` +In the second terminal run: +``` +./bin/federate__j + +``` +In the third terminal run: +``` +./bin/federate__llma + +``` +In the fourth terminal run: +``` +./bin/federate__llmb + +``` + +The system will ask for entering the quiz question which is to be obtained from the keyboard input. + +Example output printed on the terminal: + +
+
+--------------------------------------------------
+---- System clock resolution: 1 nsec
+---- Start execution on Fri Sep 19 10:46:31 2025 ---- plus 772215861 nanoseconds
+Enter the quiz question
+What is the capital of South Korea?
+Query: What is the capital of South Korea?
+
+waiting...
+
+Winner: LLM-B | logical 1184 ms | physical 1184 ms
+Answer: Seoul.
+--------------------------------------------------
+
+
+ +# Contributors diff --git a/llm/src/federated_execution/llm_base_class_federate.lf b/llm/src/federated_execution/llm_base_class_federate.lf new file mode 100644 index 0000000..14412a2 --- /dev/null +++ b/llm/src/federated_execution/llm_base_class_federate.lf @@ -0,0 +1,244 @@ +target Python + +### Reactor for calling agent 1 +reactor LlmA { + state th + state running = False + state out_buffer = "" + state ready = False + + input user_in + physical action done + physical action notify_ready + output answer + output ready_out + + reaction(startup) -> notify_ready {= + import os, sys, importlib.util, threading, traceback + act = notify_ready + def _load(): + try: + here = os.path.dirname(__file__) + if here not in sys.path: sys.path.insert(0, here) + from llm_a import agent1 + act.schedule(1) + except Exception as e: + print("[LlmA] Preload failed:", e, flush=True) + traceback.print_exc() + threading.Thread(target=_load, daemon=True).start() + =} + + reaction(notify_ready) -> ready_out {= + self.ready = True + ready_out.set(True) + =} + + reaction(user_in) -> done {= + import threading + if not self.ready: return + if self.running: return + self.running = True + q = user_in.value + from llm_a import agent1 + def agentA(): + try: + self.out_buffer = agent1(q) + finally: + try: done.schedule(5) + except Exception as e: print("[LlmA] schedule failed:", e, flush=True) + self.th = threading.Thread(target=agentA, daemon=True) + self.th.start() + =} + + reaction(done) -> answer {= + self.running = False + answer.set(self.out_buffer) + =} +} + +### Reactor for calling agent 2 +reactor LlmB { + state th + state running = False + state out_buffer = "" + state ready = False + + input user_in + physical action done + physical action notify_ready + output answer + output ready_out + + reaction(startup) -> notify_ready {= + import os, sys, importlib.util, threading, traceback + act = notify_ready + def _load(): + try: + here = os.path.dirname(__file__) + if here not in sys.path: sys.path.insert(0, here) + from llm_b_jetson import agent2 + act.schedule(1) + except Exception as e: + print("[LlmB] Preload failed:", e, flush=True) + traceback.print_exc() + threading.Thread(target=_load, daemon=True).start() + =} + + reaction(notify_ready) -> ready_out {= + self.ready = True + ready_out.set(True) + =} + + reaction(user_in) -> done {= + import threading + if not self.ready: return + if self.running: return + self.running = True + q = user_in.value + from llm_b_jetson import agent2 + def agentB(): + try: + self.out_buffer = agent2(q) + finally: + try: done.schedule(5) + except Exception as e: print("[LlmB] schedule failed:", e, flush=True) + self.th = threading.Thread(target=agentB, daemon=True) + self.th.start() + =} + + reaction(done) -> answer {= + self.running = False + answer.set(self.out_buffer) + =} +} +###Judge reactor to determine which agent responds first + +reactor Judge { + state th + state reader_started = False + state terminate = False + state eof = False + state buffer = "" + state waiting = False + state logical_base_time = 0 + state physical_base_time = 0 + input ready_a + input ready_b + state a_ready = False + state b_ready = False + physical action line + physical action tick + logical action timeout(60 sec) + output ask + input llma + input llmb + output quit + + reaction(startup) {= + print("[Judge] Waiting for models to load", flush=True) + =} + + reaction(ready_a)->line {= + self.a_ready = True + if self.a_ready and self.b_ready and not self.reader_started: + import sys, threading + def reader(): + while not self.terminate: + s = input("Enter the quiz question (or 'quit')\n") + if s == "" or s.lower().strip() == "quit": + self.eof = True + try: line.schedule(0) + except Exception as e: print("[Judge] schedule EOF failed:", e, flush=True) + break + else: + self.buffer = s + try: line.schedule(1) + except Exception as e: + print("[Judge] schedule line failed:", e, flush=True) + break + self.reader_started = True + print("[Judge] Models ready. You can ask questions now.", flush=True) + self.th = threading.Thread(target=reader, daemon=True) + self.th.start() + =} + + reaction(ready_b)->line {= + self.b_ready = True + if self.a_ready and self.b_ready and not self.reader_started: + import sys, threading + def reader(): + while not self.terminate: + s = input("Enter the quiz question (or 'quit')\n") + if s == "" or s.lower().strip() == "quit": + self.eof = True + try: line.schedule(0) + except Exception as e: print("[Judge] schedule EOF failed:", e, flush=True) + break + else: + self.buffer = s + try: line.schedule(1) + except Exception as e: + print("[Judge] schedule line failed:", e, flush=True) + break + self.reader_started = True + print("[Judge] Models ready. You can ask questions now.", flush=True) + self.th = threading.Thread(target=reader, daemon=True) + self.th.start() + =} + + reaction(line) -> tick, ask, timeout, quit {= + if self.eof: + quit.set() + environment().sync_shutdown() + else: + self.waiting = True + self.logical_base_time = lf.time.logical_elapsed() + self.physical_base_time = lf.time.physical_elapsed() + timeout.schedule(0) + print(f"\n\n\nQuery: {self.buffer}\n", flush=True) + print("waiting...\n", flush=True) + tick.schedule(5) + =} + + reaction(tick) -> ask {= + ask.set(self.buffer) + =} + + reaction(llma) {= + if not self.waiting: return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + print(f"{llma.value}", flush=True) + =} + + reaction(llmb) {= + if not self.waiting: return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + print(f"{llmb.value}", flush=True) + =} + + reaction(timeout) {= + if not self.waiting: return + self.waiting = False + logical_now = lf.time.logical_elapsed() + physical_now = lf.time.physical_elapsed() + logical_ms = int((logical_now - self.logical_base_time) / 1000000) + physical_ms = int((physical_now - self.physical_base_time) / 1000000) + print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + =} + + reaction(shutdown) {= + self.terminate = True + if self.th and self.th.is_alive(): + self.th.join() + =} +} \ No newline at end of file diff --git a/llm/src/federated_execution/llm_game_federated.lf b/llm/src/federated_execution/llm_game_federated.lf new file mode 100644 index 0000000..7da44df --- /dev/null +++ b/llm/src/federated_execution/llm_game_federated.lf @@ -0,0 +1,29 @@ +### llm.py file needs to be in the same directory +target Python { keepalive: true, files: ["python_files/llm_a.py", "python_files/llm_b.py" ] } #"llm_b.py" + +import LlmA, LlmB, Judge from "llm_base_class_federate.lf" + +preamble {= + import threading + import time + from llm_a import agent1 + from llm_b_m2 import agent2 +=} + + +federated reactor llm_game_federated at 10.218.100.95 { + + j = new Judge() + llma = new LlmA() + llmb = new LlmB() + + j.ask -> llma.user_in + j.ask -> llmb.user_in + llma.answer -> j.llma + llmb.answer -> j.llmb + + llma.ready_out -> j.ready_a + llmb.ready_out -> j.ready_b + +} + diff --git a/llm/src/python_files/llm.py b/llm/src/python_files/llm.py new file mode 100644 index 0000000..93322f1 --- /dev/null +++ b/llm/src/python_files/llm.py @@ -0,0 +1,92 @@ +### Import Libraries +import transformers +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig +from torch import cuda, bfloat16 + +### Add Your hugging face token here +hf_auth = "Add your token here" + +### Model to be chosen to act as an agent +model_id = "meta-llama/Llama-2-7b-chat-hf" +model_id_2 = "meta-llama/Llama-2-70b-chat-hf" + +### To check if there is GPU and convert it into float 16 +has_cuda = torch.cuda.is_available() +dtype = torch.bfloat16 if has_cuda else torch.float32 + +### To convert the model into 4bit quantization +bnb_config = None +### if there is cuda then the model is converted to 4bit quantization +if has_cuda: + try: + import bitsandbytes as bnb + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=dtype, + ) + except Exception: + bnb_config = None + +### calling pre-trained tokenizer +tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, token=hf_auth, use_fast=True) +for tok in (tokenizer, tokenizer_2): + if tok.pad_token_id is None: + tok.pad_token = tok.eos_token + +### since both the models have same device map and using 4bit quantization for both +common = dict( + device_map="auto" if has_cuda else None, + dtype=dtype, + low_cpu_mem_usage=True, +) +if bnb_config is not None: + common["quantization_config"] = bnb_config + +### calling pre-trained model +model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_auth, **common) +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, token=hf_auth, **common) +model.eval(); model_2.eval() + + + +### arguments for both the models +GEN_A = dict(max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id) +GEN_B = dict(max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer_2.eos_token_id, pad_token_id=tokenizer_2.pad_token_id) + +###to resturn only one line answers +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + +###Calling agent1 from .lf code +def agent1(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer(prompt, return_tensors="pt") + if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = model.generate(**inputs, **GEN_A) + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer.decode(out[0][prompt_len:], skip_special_tokens=True) + return postprocess(result) + +###Calling agent2 from .lf code +def agent2(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer_2(prompt, return_tensors="pt") + if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = model_2.generate(**inputs, **GEN_B) + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) + return postprocess(result) \ No newline at end of file diff --git a/llm/src/python_files/llm_a.py b/llm/src/python_files/llm_a.py new file mode 100644 index 0000000..15411cd --- /dev/null +++ b/llm/src/python_files/llm_a.py @@ -0,0 +1,77 @@ +# llm_a.py + +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +# <<< put your token here >>> +hf_auth = "add token here " + +# Model +model_id = "meta-llama/Llama-2-7b-chat-hf" + +# Require GPU +has_cuda = torch.cuda.is_available() +if not has_cuda: + raise RuntimeError("CUDA GPU required for this configuration.") +dtype = torch.bfloat16 if has_cuda else torch.float32 + +# 4-bit quantization +bnb_config = None +if has_cuda: + try: + import bitsandbytes as bnb + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=dtype, + ) + except Exception: + bnb_config = None + +# Tokenizer +tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) +if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + +# Shared kwargs +common = dict( + device_map="auto" if has_cuda else None, + dtype=dtype, + low_cpu_mem_usage=True, +) +if bnb_config is not None: + common["quantization_config"] = bnb_config + +# Model +model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_auth, **common) +model.eval() + +# Generation args +GEN_A = dict( + max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id +) + +# One-line postprocess +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + +# Agent 1 +def agent1(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer(prompt, return_tensors="pt") + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = model.generate(**inputs, **GEN_A) + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer.decode(out[0][prompt_len:], skip_special_tokens=True) + print(result) + return postprocess(result) \ No newline at end of file diff --git a/llm/src/python_files/llm_b.py b/llm/src/python_files/llm_b.py new file mode 100644 index 0000000..6acb7d9 --- /dev/null +++ b/llm/src/python_files/llm_b.py @@ -0,0 +1,78 @@ + +# llm_b.py + +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +# <<< put your token here >>> +hf_auth = "add token here" + +# Model +model_id_2 = "meta-llama/Llama-2-70b-chat-hf" + +# Require GPU +has_cuda = torch.cuda.is_available() +if not has_cuda: + raise RuntimeError("CUDA GPU required for this configuration.") +dtype = torch.bfloat16 if has_cuda else torch.float32 + +# 4-bit quantization +bnb_config = None +if has_cuda: + try: + import bitsandbytes as bnb + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=dtype, + ) + except Exception: + bnb_config = None + +# Tokenizer +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, token=hf_auth, use_fast=True) +if tokenizer_2.pad_token_id is None: + tokenizer_2.pad_token = tokenizer_2.eos_token + +# Shared kwargs +common = dict( + device_map="auto" if has_cuda else None, + dtype=dtype, + low_cpu_mem_usage=True, +) +if bnb_config is not None: + common["quantization_config"] = bnb_config + +# Model +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, token=hf_auth, **common) +model_2.eval() + +# Generation args +GEN_B = dict( + max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer_2.eos_token_id, pad_token_id=tokenizer_2.pad_token_id +) + +# One-line postprocess +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + +# Agent 2 +def agent2(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer_2(prompt, return_tensors="pt") + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = model_2.generate(**inputs, **GEN_B) + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) + print(result) + return postprocess(result) \ No newline at end of file diff --git a/llm/src/python_files/llm_b_jetson.py b/llm/src/python_files/llm_b_jetson.py new file mode 100644 index 0000000..40461ed --- /dev/null +++ b/llm/src/python_files/llm_b_jetson.py @@ -0,0 +1,57 @@ +# import torch +# from transformers import AutoModelForCausalLM, AutoTokenizer + +# hf_auth = "" + +# model_id = "meta-llama/Llama-3.2-1B" + +# has_cuda = torch.cuda.is_available() +# device = torch.device("cuda" if has_cuda else "cpu") +# compute_dtype = torch.float16 if has_cuda else torch.float32 + +# common = dict( +# low_cpu_mem_usage=True, +# attn_implementation="eager", +# ) + +# tok_kwargs = dict(use_fast=True) +# if hf_auth: +# tok_kwargs["token"] = hf_auth + +# tokenizer = AutoTokenizer.from_pretrained(model_id, **tok_kwargs) +# if tokenizer.pad_token_id is None: +# tokenizer.pad_token = tokenizer.eos_token + +# mp_kwargs = dict(torch_dtype=compute_dtype, **common) +# if hf_auth: +# mp_kwargs["token"] = hf_auth + +# model = AutoModelForCausalLM.from_pretrained(model_id, **mp_kwargs) +# model.to(device) +# model.eval() + +# GEN = dict( +# max_new_tokens=64, +# do_sample=True, +# temperature=0.7, +# top_p=0.95, +# eos_token_id=tokenizer.eos_token_id, +# pad_token_id=tokenizer.pad_token_id, +# ) + +# def agent2(q: str) -> str: +# prompt = f"You are a concise Q&A assistant.\n\n{q}\n" +# inputs = tokenizer(prompt, return_tensors="pt").to(device) +# with torch.inference_mode(): +# out = model.generate(**inputs, **GEN) +# gen = out[0, inputs["input_ids"].shape[1]:] +# return tokenizer.decode(gen, skip_special_tokens=True).strip() + +# if __name__ == "__main__": +# question = "What is the capital of Japan?" +# print(agent2(question)) + + +def agent2(q: str) -> str: + + return "Hello this is jetson" \ No newline at end of file diff --git a/llm/src/python_files/llm_b_m2.py b/llm/src/python_files/llm_b_m2.py new file mode 100644 index 0000000..45bad45 --- /dev/null +++ b/llm/src/python_files/llm_b_m2.py @@ -0,0 +1,102 @@ +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +hf_auth = "add your token here" + +model_id_2 = "google/gemma-3-270m" + +has_cuda = torch.cuda.is_available() +has_mps = torch.backends.mps.is_available() + +if has_cuda: + device = torch.device("cuda") + compute_dtype = torch.float16 +elif has_mps: + device = torch.device("mps") + compute_dtype = torch.float32 +else: + device = torch.device("cpu") + compute_dtype = torch.float32 + + +common = dict( + low_cpu_mem_usage=True, + attn_implementation="eager", +) + +#4-bit on CUDA if the device has it +if has_cuda: + try: + import bitsandbytes as bnb + common["quantization_config"] = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=compute_dtype, + ) + common["device_map"] = "auto" + except Exception: + print("[WARN] bitsandbytes not available; using full-precision fp16 on CUDA.", flush=True) + common["device_map"] = "auto" +else: + common["device_map"] = None + +# Tokenizer +tok_kwargs = dict(use_fast=True) +if hf_auth: + tok_kwargs["token"] = hf_auth +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, **tok_kwargs) +if tokenizer_2.pad_token_id is None: + tokenizer_2.pad_token = tokenizer_2.eos_token + +# Model +mp_kwargs = dict(dtype=compute_dtype, **common) +if hf_auth: + mp_kwargs["token"] = hf_auth + +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, **mp_kwargs) +if not has_cuda: + model_2.to(device) +model_2.eval() + +# Greedy decoding +GEN_B = dict( + max_new_tokens=32, + do_sample=True, + eos_token_id=tokenizer_2.eos_token_id, + pad_token_id=tokenizer_2.pad_token_id, +) + +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + i = t.find(sep) + if i > 0: + t = t[:i] + break + return t.strip().strip(":").strip() + +def agent2(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer_2(prompt, return_tensors="pt") + + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + elif has_mps: + inputs = {k: v.to("mps") for k, v in inputs.items()} + else: + inputs = {k: v.to("cpu") for k, v in inputs.items()} + + with torch.inference_mode(): + out = model_2.generate(**inputs, **GEN_B) + + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) + print(result) + return postprocess(result) + +# def main(): +# agent2("what is AI?") + +# if __name__ == "__main__": +# main() \ No newline at end of file From cd83f0aa01f04dc2da46d75791a0625debc9cd39 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Tue, 14 Oct 2025 20:03:59 -0700 Subject: [PATCH 21/54] Updated the correct links for federated_execution and requirements in README.md --- llm/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/README.md b/llm/README.md index f7a0214..1c595d2 100644 --- a/llm/README.md +++ b/llm/README.md @@ -1,5 +1,5 @@ # LLM Demo - +[Federated execution](src/federated_execution/) - For federated execution of this demo. # Overview This is a quiz-style game between two LLM agents. For each user question typed at the keyboard, both agents answer in parallel. The Judge announces whichever answer arrives first (or a timeout if neither responds within 60 sec), and prints per-question elapsed logical and physical times. @@ -8,7 +8,7 @@ This is a quiz-style game between two LLM agents. For each user question typed a You need Python installed, as llm.py is written in Python. ## Library Dependencies -To run this project, there are dependencies required which are in lf-demos/llm/requirements.txt file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. +To run this project, there are dependencies required which are in [requirements.txt](requirements.txt) file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. While newer versions of other dependencies may work, the specific versions listed below have been tested and are recommended for optimal performance. It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. From 6b8c458fa45bb08c96ab113ec43547d044a44d39 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Tue, 14 Oct 2025 20:05:00 -0700 Subject: [PATCH 22/54] Updated the requirements.txt for README.md --- llm/src/federated_execution/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/src/federated_execution/README.md b/llm/src/federated_execution/README.md index 7ce4fff..0055843 100644 --- a/llm/src/federated_execution/README.md +++ b/llm/src/federated_execution/README.md @@ -8,7 +8,7 @@ This is a quiz-style game between two LLM agents using federated execution. For You need Python installed, as llm_a.py, llm_b.py, llm_b_m2.py and llm_b_jetson.py are written in Python. # any version >= 3.10 ## Library Dependencies -To run this project, there are dependencies required which are in lf-demos/llm/requirements.txt file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. +To run this project, there are dependencies required which are in [requirements.txt](requirements.txt) file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. While newer versions of other dependencies may work, the specific versions listed below have been tested and are recommended for optimal performance. It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. From abd32edca2f7994c19bae6af209611314d9e579a Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Tue, 14 Oct 2025 21:25:56 -0700 Subject: [PATCH 23/54] changed the llm_b import statement --- llm/src/federated_execution/llm_base_class_federate.lf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/src/federated_execution/llm_base_class_federate.lf b/llm/src/federated_execution/llm_base_class_federate.lf index 14412a2..57171ed 100644 --- a/llm/src/federated_execution/llm_base_class_federate.lf +++ b/llm/src/federated_execution/llm_base_class_federate.lf @@ -76,7 +76,7 @@ reactor LlmB { try: here = os.path.dirname(__file__) if here not in sys.path: sys.path.insert(0, here) - from llm_b_jetson import agent2 + from llm_b import agent2 act.schedule(1) except Exception as e: print("[LlmB] Preload failed:", e, flush=True) @@ -95,7 +95,7 @@ reactor LlmB { if self.running: return self.running = True q = user_in.value - from llm_b_jetson import agent2 + from llm_b import agent2 def agentB(): try: self.out_buffer = agent2(q) From 27d356173e0177b46929bb819f9dff8a3c1b7b3d Mon Sep 17 00:00:00 2001 From: Hokeun Kim Date: Wed, 15 Oct 2025 13:35:18 -0700 Subject: [PATCH 24/54] Rename directories and remove unnecessary files --- llm/src/{ => agents}/llm.py | 0 llm/src/{ => agents}/llm_a.py | 0 llm/src/{ => agents}/llm_b.py | 0 llm/src/{ => agents}/llm_b_jetson.py | 0 llm/src/{ => agents}/llm_b_m2.py | 0 .../README.md | 0 .../llm_base_class_federate.lf | 0 .../llm_game_federated.lf | 0 llm/src/llm_base_class_federate.lf | 244 ------------------ llm/src/llm_game_federated.lf | 29 --- llm/src/python_files/llm.py | 92 ------- llm/src/python_files/llm_a.py | 77 ------ llm/src/python_files/llm_b.py | 78 ------ llm/src/python_files/llm_b_jetson.py | 57 ---- llm/src/python_files/llm_b_m2.py | 102 -------- 15 files changed, 679 deletions(-) rename llm/src/{ => agents}/llm.py (100%) rename llm/src/{ => agents}/llm_a.py (100%) rename llm/src/{ => agents}/llm_b.py (100%) rename llm/src/{ => agents}/llm_b_jetson.py (100%) rename llm/src/{ => agents}/llm_b_m2.py (100%) rename llm/src/{federated_execution => federated}/README.md (100%) rename llm/src/{federated_execution => federated}/llm_base_class_federate.lf (100%) rename llm/src/{federated_execution => federated}/llm_game_federated.lf (100%) delete mode 100644 llm/src/llm_base_class_federate.lf delete mode 100644 llm/src/llm_game_federated.lf delete mode 100644 llm/src/python_files/llm.py delete mode 100644 llm/src/python_files/llm_a.py delete mode 100644 llm/src/python_files/llm_b.py delete mode 100644 llm/src/python_files/llm_b_jetson.py delete mode 100644 llm/src/python_files/llm_b_m2.py diff --git a/llm/src/llm.py b/llm/src/agents/llm.py similarity index 100% rename from llm/src/llm.py rename to llm/src/agents/llm.py diff --git a/llm/src/llm_a.py b/llm/src/agents/llm_a.py similarity index 100% rename from llm/src/llm_a.py rename to llm/src/agents/llm_a.py diff --git a/llm/src/llm_b.py b/llm/src/agents/llm_b.py similarity index 100% rename from llm/src/llm_b.py rename to llm/src/agents/llm_b.py diff --git a/llm/src/llm_b_jetson.py b/llm/src/agents/llm_b_jetson.py similarity index 100% rename from llm/src/llm_b_jetson.py rename to llm/src/agents/llm_b_jetson.py diff --git a/llm/src/llm_b_m2.py b/llm/src/agents/llm_b_m2.py similarity index 100% rename from llm/src/llm_b_m2.py rename to llm/src/agents/llm_b_m2.py diff --git a/llm/src/federated_execution/README.md b/llm/src/federated/README.md similarity index 100% rename from llm/src/federated_execution/README.md rename to llm/src/federated/README.md diff --git a/llm/src/federated_execution/llm_base_class_federate.lf b/llm/src/federated/llm_base_class_federate.lf similarity index 100% rename from llm/src/federated_execution/llm_base_class_federate.lf rename to llm/src/federated/llm_base_class_federate.lf diff --git a/llm/src/federated_execution/llm_game_federated.lf b/llm/src/federated/llm_game_federated.lf similarity index 100% rename from llm/src/federated_execution/llm_game_federated.lf rename to llm/src/federated/llm_game_federated.lf diff --git a/llm/src/llm_base_class_federate.lf b/llm/src/llm_base_class_federate.lf deleted file mode 100644 index 14412a2..0000000 --- a/llm/src/llm_base_class_federate.lf +++ /dev/null @@ -1,244 +0,0 @@ -target Python - -### Reactor for calling agent 1 -reactor LlmA { - state th - state running = False - state out_buffer = "" - state ready = False - - input user_in - physical action done - physical action notify_ready - output answer - output ready_out - - reaction(startup) -> notify_ready {= - import os, sys, importlib.util, threading, traceback - act = notify_ready - def _load(): - try: - here = os.path.dirname(__file__) - if here not in sys.path: sys.path.insert(0, here) - from llm_a import agent1 - act.schedule(1) - except Exception as e: - print("[LlmA] Preload failed:", e, flush=True) - traceback.print_exc() - threading.Thread(target=_load, daemon=True).start() - =} - - reaction(notify_ready) -> ready_out {= - self.ready = True - ready_out.set(True) - =} - - reaction(user_in) -> done {= - import threading - if not self.ready: return - if self.running: return - self.running = True - q = user_in.value - from llm_a import agent1 - def agentA(): - try: - self.out_buffer = agent1(q) - finally: - try: done.schedule(5) - except Exception as e: print("[LlmA] schedule failed:", e, flush=True) - self.th = threading.Thread(target=agentA, daemon=True) - self.th.start() - =} - - reaction(done) -> answer {= - self.running = False - answer.set(self.out_buffer) - =} -} - -### Reactor for calling agent 2 -reactor LlmB { - state th - state running = False - state out_buffer = "" - state ready = False - - input user_in - physical action done - physical action notify_ready - output answer - output ready_out - - reaction(startup) -> notify_ready {= - import os, sys, importlib.util, threading, traceback - act = notify_ready - def _load(): - try: - here = os.path.dirname(__file__) - if here not in sys.path: sys.path.insert(0, here) - from llm_b_jetson import agent2 - act.schedule(1) - except Exception as e: - print("[LlmB] Preload failed:", e, flush=True) - traceback.print_exc() - threading.Thread(target=_load, daemon=True).start() - =} - - reaction(notify_ready) -> ready_out {= - self.ready = True - ready_out.set(True) - =} - - reaction(user_in) -> done {= - import threading - if not self.ready: return - if self.running: return - self.running = True - q = user_in.value - from llm_b_jetson import agent2 - def agentB(): - try: - self.out_buffer = agent2(q) - finally: - try: done.schedule(5) - except Exception as e: print("[LlmB] schedule failed:", e, flush=True) - self.th = threading.Thread(target=agentB, daemon=True) - self.th.start() - =} - - reaction(done) -> answer {= - self.running = False - answer.set(self.out_buffer) - =} -} -###Judge reactor to determine which agent responds first - -reactor Judge { - state th - state reader_started = False - state terminate = False - state eof = False - state buffer = "" - state waiting = False - state logical_base_time = 0 - state physical_base_time = 0 - input ready_a - input ready_b - state a_ready = False - state b_ready = False - physical action line - physical action tick - logical action timeout(60 sec) - output ask - input llma - input llmb - output quit - - reaction(startup) {= - print("[Judge] Waiting for models to load", flush=True) - =} - - reaction(ready_a)->line {= - self.a_ready = True - if self.a_ready and self.b_ready and not self.reader_started: - import sys, threading - def reader(): - while not self.terminate: - s = input("Enter the quiz question (or 'quit')\n") - if s == "" or s.lower().strip() == "quit": - self.eof = True - try: line.schedule(0) - except Exception as e: print("[Judge] schedule EOF failed:", e, flush=True) - break - else: - self.buffer = s - try: line.schedule(1) - except Exception as e: - print("[Judge] schedule line failed:", e, flush=True) - break - self.reader_started = True - print("[Judge] Models ready. You can ask questions now.", flush=True) - self.th = threading.Thread(target=reader, daemon=True) - self.th.start() - =} - - reaction(ready_b)->line {= - self.b_ready = True - if self.a_ready and self.b_ready and not self.reader_started: - import sys, threading - def reader(): - while not self.terminate: - s = input("Enter the quiz question (or 'quit')\n") - if s == "" or s.lower().strip() == "quit": - self.eof = True - try: line.schedule(0) - except Exception as e: print("[Judge] schedule EOF failed:", e, flush=True) - break - else: - self.buffer = s - try: line.schedule(1) - except Exception as e: - print("[Judge] schedule line failed:", e, flush=True) - break - self.reader_started = True - print("[Judge] Models ready. You can ask questions now.", flush=True) - self.th = threading.Thread(target=reader, daemon=True) - self.th.start() - =} - - reaction(line) -> tick, ask, timeout, quit {= - if self.eof: - quit.set() - environment().sync_shutdown() - else: - self.waiting = True - self.logical_base_time = lf.time.logical_elapsed() - self.physical_base_time = lf.time.physical_elapsed() - timeout.schedule(0) - print(f"\n\n\nQuery: {self.buffer}\n", flush=True) - print("waiting...\n", flush=True) - tick.schedule(5) - =} - - reaction(tick) -> ask {= - ask.set(self.buffer) - =} - - reaction(llma) {= - if not self.waiting: return - self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) - print(f"{llma.value}", flush=True) - =} - - reaction(llmb) {= - if not self.waiting: return - self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) - print(f"{llmb.value}", flush=True) - =} - - reaction(timeout) {= - if not self.waiting: return - self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) - =} - - reaction(shutdown) {= - self.terminate = True - if self.th and self.th.is_alive(): - self.th.join() - =} -} \ No newline at end of file diff --git a/llm/src/llm_game_federated.lf b/llm/src/llm_game_federated.lf deleted file mode 100644 index d2b745c..0000000 --- a/llm/src/llm_game_federated.lf +++ /dev/null @@ -1,29 +0,0 @@ -### llm.py file needs to be in the same directory -target Python { keepalive: true, files: ["llm_a.py", "llm_b_jetson.py" ] } #"llm_b.py" - -import LlmA, LlmB, Judge from "llm_base_class_federate.lf" - -preamble {= - import threading - import time - from llm_a import agent1 - from llm_b_m2 import agent2 -=} - - -federated reactor llm_game_federated at 10.155.241.175 { - - j = new Judge() - llma = new LlmA() - llmb = new LlmB() - - j.ask -> llma.user_in - j.ask -> llmb.user_in - llma.answer -> j.llma - llmb.answer -> j.llmb - - llma.ready_out -> j.ready_a - llmb.ready_out -> j.ready_b - -} - diff --git a/llm/src/python_files/llm.py b/llm/src/python_files/llm.py deleted file mode 100644 index 93322f1..0000000 --- a/llm/src/python_files/llm.py +++ /dev/null @@ -1,92 +0,0 @@ -### Import Libraries -import transformers -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig -from torch import cuda, bfloat16 - -### Add Your hugging face token here -hf_auth = "Add your token here" - -### Model to be chosen to act as an agent -model_id = "meta-llama/Llama-2-7b-chat-hf" -model_id_2 = "meta-llama/Llama-2-70b-chat-hf" - -### To check if there is GPU and convert it into float 16 -has_cuda = torch.cuda.is_available() -dtype = torch.bfloat16 if has_cuda else torch.float32 - -### To convert the model into 4bit quantization -bnb_config = None -### if there is cuda then the model is converted to 4bit quantization -if has_cuda: - try: - import bitsandbytes as bnb - bnb_config = BitsAndBytesConfig( - load_in_4bit=True, - bnb_4bit_quant_type="nf4", - bnb_4bit_use_double_quant=True, - bnb_4bit_compute_dtype=dtype, - ) - except Exception: - bnb_config = None - -### calling pre-trained tokenizer -tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) -tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, token=hf_auth, use_fast=True) -for tok in (tokenizer, tokenizer_2): - if tok.pad_token_id is None: - tok.pad_token = tok.eos_token - -### since both the models have same device map and using 4bit quantization for both -common = dict( - device_map="auto" if has_cuda else None, - dtype=dtype, - low_cpu_mem_usage=True, -) -if bnb_config is not None: - common["quantization_config"] = bnb_config - -### calling pre-trained model -model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_auth, **common) -model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, token=hf_auth, **common) -model.eval(); model_2.eval() - - - -### arguments for both the models -GEN_A = dict(max_new_tokens=24, do_sample=False, temperature=0.1, - eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id) -GEN_B = dict(max_new_tokens=24, do_sample=False, temperature=0.1, - eos_token_id=tokenizer_2.eos_token_id, pad_token_id=tokenizer_2.pad_token_id) - -###to resturn only one line answers -def postprocess(text: str) -> str: - t = text.strip() - for sep in ["\n", ". ", " "]: - idx = t.find(sep) - if idx > 0: - t = t[:idx] - break - return t.strip().strip(":").strip() - -###Calling agent1 from .lf code -def agent1(q: str) -> str: - prompt = f"You are a concise Q&A assistant.\n\n{q}\n" - inputs = tokenizer(prompt, return_tensors="pt") - if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} - with torch.no_grad(): - out = model.generate(**inputs, **GEN_A) - prompt_len = inputs["input_ids"].shape[1] - result = tokenizer.decode(out[0][prompt_len:], skip_special_tokens=True) - return postprocess(result) - -###Calling agent2 from .lf code -def agent2(q: str) -> str: - prompt = f"You are a concise Q&A assistant.\n\n{q}\n" - inputs = tokenizer_2(prompt, return_tensors="pt") - if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} - with torch.no_grad(): - out = model_2.generate(**inputs, **GEN_B) - prompt_len = inputs["input_ids"].shape[1] - result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) - return postprocess(result) \ No newline at end of file diff --git a/llm/src/python_files/llm_a.py b/llm/src/python_files/llm_a.py deleted file mode 100644 index 15411cd..0000000 --- a/llm/src/python_files/llm_a.py +++ /dev/null @@ -1,77 +0,0 @@ -# llm_a.py - -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig - -# <<< put your token here >>> -hf_auth = "add token here " - -# Model -model_id = "meta-llama/Llama-2-7b-chat-hf" - -# Require GPU -has_cuda = torch.cuda.is_available() -if not has_cuda: - raise RuntimeError("CUDA GPU required for this configuration.") -dtype = torch.bfloat16 if has_cuda else torch.float32 - -# 4-bit quantization -bnb_config = None -if has_cuda: - try: - import bitsandbytes as bnb - bnb_config = BitsAndBytesConfig( - load_in_4bit=True, - bnb_4bit_quant_type="nf4", - bnb_4bit_use_double_quant=True, - bnb_4bit_compute_dtype=dtype, - ) - except Exception: - bnb_config = None - -# Tokenizer -tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) -if tokenizer.pad_token_id is None: - tokenizer.pad_token = tokenizer.eos_token - -# Shared kwargs -common = dict( - device_map="auto" if has_cuda else None, - dtype=dtype, - low_cpu_mem_usage=True, -) -if bnb_config is not None: - common["quantization_config"] = bnb_config - -# Model -model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_auth, **common) -model.eval() - -# Generation args -GEN_A = dict( - max_new_tokens=24, do_sample=False, temperature=0.1, - eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id -) - -# One-line postprocess -def postprocess(text: str) -> str: - t = text.strip() - for sep in ["\n", ". ", " "]: - idx = t.find(sep) - if idx > 0: - t = t[:idx] - break - return t.strip().strip(":").strip() - -# Agent 1 -def agent1(q: str) -> str: - prompt = f"You are a concise Q&A assistant.\n\n{q}\n" - inputs = tokenizer(prompt, return_tensors="pt") - if has_cuda: - inputs = {k: v.to("cuda") for k, v in inputs.items()} - with torch.no_grad(): - out = model.generate(**inputs, **GEN_A) - prompt_len = inputs["input_ids"].shape[1] - result = tokenizer.decode(out[0][prompt_len:], skip_special_tokens=True) - print(result) - return postprocess(result) \ No newline at end of file diff --git a/llm/src/python_files/llm_b.py b/llm/src/python_files/llm_b.py deleted file mode 100644 index 6acb7d9..0000000 --- a/llm/src/python_files/llm_b.py +++ /dev/null @@ -1,78 +0,0 @@ - -# llm_b.py - -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig - -# <<< put your token here >>> -hf_auth = "add token here" - -# Model -model_id_2 = "meta-llama/Llama-2-70b-chat-hf" - -# Require GPU -has_cuda = torch.cuda.is_available() -if not has_cuda: - raise RuntimeError("CUDA GPU required for this configuration.") -dtype = torch.bfloat16 if has_cuda else torch.float32 - -# 4-bit quantization -bnb_config = None -if has_cuda: - try: - import bitsandbytes as bnb - bnb_config = BitsAndBytesConfig( - load_in_4bit=True, - bnb_4bit_quant_type="nf4", - bnb_4bit_use_double_quant=True, - bnb_4bit_compute_dtype=dtype, - ) - except Exception: - bnb_config = None - -# Tokenizer -tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, token=hf_auth, use_fast=True) -if tokenizer_2.pad_token_id is None: - tokenizer_2.pad_token = tokenizer_2.eos_token - -# Shared kwargs -common = dict( - device_map="auto" if has_cuda else None, - dtype=dtype, - low_cpu_mem_usage=True, -) -if bnb_config is not None: - common["quantization_config"] = bnb_config - -# Model -model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, token=hf_auth, **common) -model_2.eval() - -# Generation args -GEN_B = dict( - max_new_tokens=24, do_sample=False, temperature=0.1, - eos_token_id=tokenizer_2.eos_token_id, pad_token_id=tokenizer_2.pad_token_id -) - -# One-line postprocess -def postprocess(text: str) -> str: - t = text.strip() - for sep in ["\n", ". ", " "]: - idx = t.find(sep) - if idx > 0: - t = t[:idx] - break - return t.strip().strip(":").strip() - -# Agent 2 -def agent2(q: str) -> str: - prompt = f"You are a concise Q&A assistant.\n\n{q}\n" - inputs = tokenizer_2(prompt, return_tensors="pt") - if has_cuda: - inputs = {k: v.to("cuda") for k, v in inputs.items()} - with torch.no_grad(): - out = model_2.generate(**inputs, **GEN_B) - prompt_len = inputs["input_ids"].shape[1] - result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) - print(result) - return postprocess(result) \ No newline at end of file diff --git a/llm/src/python_files/llm_b_jetson.py b/llm/src/python_files/llm_b_jetson.py deleted file mode 100644 index 40461ed..0000000 --- a/llm/src/python_files/llm_b_jetson.py +++ /dev/null @@ -1,57 +0,0 @@ -# import torch -# from transformers import AutoModelForCausalLM, AutoTokenizer - -# hf_auth = "" - -# model_id = "meta-llama/Llama-3.2-1B" - -# has_cuda = torch.cuda.is_available() -# device = torch.device("cuda" if has_cuda else "cpu") -# compute_dtype = torch.float16 if has_cuda else torch.float32 - -# common = dict( -# low_cpu_mem_usage=True, -# attn_implementation="eager", -# ) - -# tok_kwargs = dict(use_fast=True) -# if hf_auth: -# tok_kwargs["token"] = hf_auth - -# tokenizer = AutoTokenizer.from_pretrained(model_id, **tok_kwargs) -# if tokenizer.pad_token_id is None: -# tokenizer.pad_token = tokenizer.eos_token - -# mp_kwargs = dict(torch_dtype=compute_dtype, **common) -# if hf_auth: -# mp_kwargs["token"] = hf_auth - -# model = AutoModelForCausalLM.from_pretrained(model_id, **mp_kwargs) -# model.to(device) -# model.eval() - -# GEN = dict( -# max_new_tokens=64, -# do_sample=True, -# temperature=0.7, -# top_p=0.95, -# eos_token_id=tokenizer.eos_token_id, -# pad_token_id=tokenizer.pad_token_id, -# ) - -# def agent2(q: str) -> str: -# prompt = f"You are a concise Q&A assistant.\n\n{q}\n" -# inputs = tokenizer(prompt, return_tensors="pt").to(device) -# with torch.inference_mode(): -# out = model.generate(**inputs, **GEN) -# gen = out[0, inputs["input_ids"].shape[1]:] -# return tokenizer.decode(gen, skip_special_tokens=True).strip() - -# if __name__ == "__main__": -# question = "What is the capital of Japan?" -# print(agent2(question)) - - -def agent2(q: str) -> str: - - return "Hello this is jetson" \ No newline at end of file diff --git a/llm/src/python_files/llm_b_m2.py b/llm/src/python_files/llm_b_m2.py deleted file mode 100644 index 45bad45..0000000 --- a/llm/src/python_files/llm_b_m2.py +++ /dev/null @@ -1,102 +0,0 @@ -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig - -hf_auth = "add your token here" - -model_id_2 = "google/gemma-3-270m" - -has_cuda = torch.cuda.is_available() -has_mps = torch.backends.mps.is_available() - -if has_cuda: - device = torch.device("cuda") - compute_dtype = torch.float16 -elif has_mps: - device = torch.device("mps") - compute_dtype = torch.float32 -else: - device = torch.device("cpu") - compute_dtype = torch.float32 - - -common = dict( - low_cpu_mem_usage=True, - attn_implementation="eager", -) - -#4-bit on CUDA if the device has it -if has_cuda: - try: - import bitsandbytes as bnb - common["quantization_config"] = BitsAndBytesConfig( - load_in_4bit=True, - bnb_4bit_quant_type="nf4", - bnb_4bit_use_double_quant=True, - bnb_4bit_compute_dtype=compute_dtype, - ) - common["device_map"] = "auto" - except Exception: - print("[WARN] bitsandbytes not available; using full-precision fp16 on CUDA.", flush=True) - common["device_map"] = "auto" -else: - common["device_map"] = None - -# Tokenizer -tok_kwargs = dict(use_fast=True) -if hf_auth: - tok_kwargs["token"] = hf_auth -tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, **tok_kwargs) -if tokenizer_2.pad_token_id is None: - tokenizer_2.pad_token = tokenizer_2.eos_token - -# Model -mp_kwargs = dict(dtype=compute_dtype, **common) -if hf_auth: - mp_kwargs["token"] = hf_auth - -model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, **mp_kwargs) -if not has_cuda: - model_2.to(device) -model_2.eval() - -# Greedy decoding -GEN_B = dict( - max_new_tokens=32, - do_sample=True, - eos_token_id=tokenizer_2.eos_token_id, - pad_token_id=tokenizer_2.pad_token_id, -) - -def postprocess(text: str) -> str: - t = text.strip() - for sep in ["\n", ". ", " "]: - i = t.find(sep) - if i > 0: - t = t[:i] - break - return t.strip().strip(":").strip() - -def agent2(q: str) -> str: - prompt = f"You are a concise Q&A assistant.\n\n{q}\n" - inputs = tokenizer_2(prompt, return_tensors="pt") - - if has_cuda: - inputs = {k: v.to("cuda") for k, v in inputs.items()} - elif has_mps: - inputs = {k: v.to("mps") for k, v in inputs.items()} - else: - inputs = {k: v.to("cpu") for k, v in inputs.items()} - - with torch.inference_mode(): - out = model_2.generate(**inputs, **GEN_B) - - prompt_len = inputs["input_ids"].shape[1] - result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) - print(result) - return postprocess(result) - -# def main(): -# agent2("what is AI?") - -# if __name__ == "__main__": -# main() \ No newline at end of file From 04f195a150e19556db83cb3d72c5afcfad437761 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Thu, 16 Oct 2025 14:51:13 -0700 Subject: [PATCH 25/54] Added more instruction on how to execute this demo README.md --- llm/README.md | 59 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 52 insertions(+), 7 deletions(-) diff --git a/llm/README.md b/llm/README.md index 1c595d2..5ad31df 100644 --- a/llm/README.md +++ b/llm/README.md @@ -10,7 +10,55 @@ You need Python installed, as llm.py is written in Python. ## Library Dependencies To run this project, there are dependencies required which are in [requirements.txt](requirements.txt) file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. While newer versions of other dependencies may work, the specific versions listed below have been tested and are recommended for optimal performance. -It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. +It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. \ +To create the a virtual environment follow the steps below. + +### Step 1: Creating environment +Replace this <> with the environment name +``` +python3 -m venv +source /bin/activate +``` +or +``` +conda create -n +conda activate +``` +### Step 2: Installing the required packages +Check if pip is installed: +``` +pip --version +``` +If it is not installed: +``` +python -m pip install --upgrade pip +``` +Run this command to install the packages from the [requirements.txt](requirements.txt) file: +``` +pip install -r requirements.txt +``` +For installing torch: + +1. For devices without GPU +``` +pip install torch torchvision +``` +2. For devices with GPU + Checking the CUDA version run this command: + ``` + nvidia-smi + ``` + Look for the line "CUDA Version" as shown in the image: + + + With the correct version install PyTorch from [PyTorch](https://pytorch.org/get-started/locally/) by selecting the right correct OS and compute platform as shown in the image below for Linux system with CUDA version 12.8: + +### Step 3: Model Dependencies +- **Pre-trained Models used in the agents/llm.py**: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) , [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) \ +**Note:** Follow the steps below to obtain the access and authentication key for the hugging face models. +1. Create the user access token and follow the steps shown on the official documentation: [User access tokens](https://huggingface.co/docs/hub/en/security-tokens) +2. Log in using the Hugging Face CLI by running huggingface-cli login. Please refer to the official documentation for step-by-step instructions - [HuggingFace CLI](https://huggingface.co/docs/huggingface_hub/en/guides/cli) +3. For the Llama Models you will require access to use the models if you are using it for the first time. Open these links and apply for accessing the models ([meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)) ## System Requirements @@ -18,16 +66,13 @@ To ensure optimal performance, the following hardware and software requirements **Note:** To replicate this model, you can use any equivalent hardware that meets the computational requirements. ### Hardware Requirements +The demo was tested with the following hardware setup. - **GPU**: NVIDIA RTX A6000 ### Software Requirements -- **Python** (Ensure Python is installed) +- **OS**: Linux +- **Python** - **CUDA Version**: 12.8 -- **NVIDIA-SMI**: For monitoring GPU performance and memory utilization - -### Model Dependencies -- **Pre-trained Models**: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) -**Note:** Please access and use the pre-trained models, authentication keys must be obtained from the [Hugging Face repository](https://huggingface.co/settings/tokens). Ensure you have a valid API token and configure authentication. Make sure the environment is properly configured to use CUDA for optimal GPU acceleration. From 15075fb34d7e34cbb43bcb4a214bbacb84fb61ed Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 16 Oct 2025 14:53:23 -0700 Subject: [PATCH 26/54] changed the path file names for the python files --- llm/src/federated/llm_game_federated.lf | 2 +- llm/src/llm_quiz_game.lf | 176 +----------------------- 2 files changed, 2 insertions(+), 176 deletions(-) diff --git a/llm/src/federated/llm_game_federated.lf b/llm/src/federated/llm_game_federated.lf index 7da44df..6f4f1a8 100644 --- a/llm/src/federated/llm_game_federated.lf +++ b/llm/src/federated/llm_game_federated.lf @@ -1,5 +1,5 @@ ### llm.py file needs to be in the same directory -target Python { keepalive: true, files: ["python_files/llm_a.py", "python_files/llm_b.py" ] } #"llm_b.py" +target Python { keepalive: true, files: ["agents/llm_a.py", "agents/llm_b.py" ] } #"llm_b.py" import LlmA, LlmB, Judge from "llm_base_class_federate.lf" diff --git a/llm/src/llm_quiz_game.lf b/llm/src/llm_quiz_game.lf index 7ba9d6b..62fab15 100644 --- a/llm/src/llm_quiz_game.lf +++ b/llm/src/llm_quiz_game.lf @@ -1,5 +1,5 @@ ### llm.py file needs to be in the same directory -target Python { keepalive: true, files: ["llm.py"] } +target Python { keepalive: true, files: ["agents/llm.py"] } import KeyboardInput from "llm_base_class.lf" import LlmA from "llm_base_class.lf" @@ -26,177 +26,3 @@ main reactor { } -// def keyboard_prompt(reactor, action): - // while True: - // time.sleep(5) - // action.schedule(None) - -// ### Reactor for handling user keyboard input -// reactor KeyboardInput { -// state th -// state terminate = False -// state eof = False -// state buffer = "" - -// physical action line -// output prompt -// output quit - -// reaction(startup) -> line {= -// def reader(): -// while not self.terminate: - -// s = input("Enter the quiz question\n") -// if s == "": -// self.eof = True -// line.schedule(0) -// break -// elif s.lower().strip() == "quit": -// self.eof = True -// line.schedule(0) -// break -// else: -// self.buffer = s -// line.schedule(1) -// self.th = threading.Thread(target=reader, daemon=True) -// self.th.start() -// =} - -// reaction(line) -> prompt, quit {= -// if self.eof: -// quit.set() -// environment().sync_shutdown() -// else: -// prompt.set(self.buffer) -// =} - -// reaction(shutdown) {= -// self.terminate = True -// if self.th and self.th.is_alive(): -// self.th.join() -// =} -// } - -// ### Reactor for calling agent 1 -// reactor LlmA { -// state th -// state running = False -// state out_buffer = "" - -// input user_in -// physical action done -// output answer - - -// reaction(user_in) -> done {= -// if self.running: -// return -// self.running = True -// query = user_in.value -// def agentA(): -// try: -// self.out_buffer = agent1(query) -// finally: -// done.schedule(1) -// self.th = threading.Thread(target=agentA, daemon=True) -// self.th.start() -// =} - -// reaction(done) -> answer {= -// self.running = False -// answer.set(self.out_buffer) -// =} -// } - - -// ### Reactor for calling agent 2 -// reactor LlmB { -// state th -// state running = False -// state out_buffer = "" -// input user_in -// output answer - -// physical action done - -// reaction(user_in)->done{= -// if self.running: -// return -// self.running = True -// query = user_in.value -// def agentB(): -// try: -// self.out_buffer = agent2(query) -// finally: -// done.schedule(1) -// self.th = threading.Thread(target=agentB, daemon=True) -// self.th.start() -// =} - -// reaction(done)->answer{= -// self.running = False -// answer.set(self.out_buffer) -// =} - -// } - -// ###Judge reactor to determine which agent responds first -// reactor Judge{ -// input query -// input llma -// input llmb -// output ask - -// state waiting = False -// state logical_base_time = 0 -// state physical_base_time = 0 -// state winner = "" - -// logical action timeout(60 sec) - -// reaction(query) -> timeout, ask {= -// self.waiting = True -// self.winner = "" -// self.logical_base_time = lf.time.logical_elapsed() -// self.physical_base_time = lf.time.physical_elapsed() -// timeout.schedule(0) -// print(f"\n\n\nQuery: {query.value}\n") -// print("waiting...\n") -// ask.set(query.value) -// =} - -// reaction(llma) {= -// if not self.waiting: -// return -// self.waiting = False -// logical_now = lf.time.logical_elapsed() -// physical_now = lf.time.physical_elapsed() -// logical_ms = int((logical_now - self.logical_base_time) / 1000000) -// physical_ms = int((physical_now - self.physical_base_time) / 1000000) -// print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms") -// print(f"{llma.value}") -// =} - -// reaction(llmb) {= -// if not self.waiting: -// return -// self.waiting = False -// logical_now = lf.time.logical_elapsed() -// physical_now = lf.time.physical_elapsed() -// logical_ms = int((logical_now - self.logical_base_time) / 1000000) -// physical_ms = int((physical_now - self.physical_base_time) / 1000000) -// print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms") -// print(f"{llmb.value}") -// =} - -// reaction(timeout) {= -// if not self.waiting: -// return -// self.waiting = False -// logical_now = lf.time.logical_elapsed() -// physical_now = lf.time.physical_elapsed() -// logical_ms = int((logical_now - self.logical_base_time) / 1000000) -// physical_ms = int((physical_now - self.physical_base_time) / 1000000) -// print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") -// =} -// } From 105cecfe2bd5dd64748a863c222df83884ac769f Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 16 Oct 2025 14:57:41 -0700 Subject: [PATCH 27/54] Added the images folder for README.md --- llm/img/cudaversion.png | Bin 0 -> 81271 bytes llm/img/pytorch.png | Bin 0 -> 53985 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 llm/img/cudaversion.png create mode 100644 llm/img/pytorch.png diff --git a/llm/img/cudaversion.png b/llm/img/cudaversion.png new file mode 100644 index 0000000000000000000000000000000000000000..2b7e87408d1f0d3b5f0cf09abfef396886f7b21e GIT binary patch literal 81271 zcmdqJWk6iXwl)lepaFur2XEZn0))mI_uv+SyN6)GgS)%C1PvV|Snv)(gIj>$4)11W z&bjBFJM;JZ@j=tQd)F>qRjZy_>sf?Dl;tqcNYLQm;4tK0y;OsPLo9=XLy$&60`7cP zlrx5dgMV!!Ee(;EmZpL@J6PJhwSa?TjC1=qs(_%1*B5K?Y7;7<_Y%E;1`(Y!0p|Y_ zk&T{v$bD^!XMBf~n|gm&+!|Z5!hNEifcoW2R(*WLr&VvX_l)_Nru>w7^-d|mPSzX` z*SgSMukgOH*u!bgjB~k!{{-9fZhHk0`k$IUTSJ@jGDx1%c<*p1viZaSddSeL&o2CF zdeFpr{n(ZIqg@BZZPlT`l9Z~|I!1hNt0i1DN}U0B>rcDYce2vhhp22nd_e*ZCo&cE z8S*&G*f+YmOwYw}!%pnH%AzZhl+JmBQkS$fQV0f-UrE0*A+Twa{Q8kYIzlZpz`Iyq zRQ~NdGjtM|suVGj+W^x_Y3?f#Ec|B)D7XqAKAM_-@AlX>e}nNs3HwE6)FQ%Gki(+$(EcO1I%R6!42GpOdaoO&-j` zk=Q@@0|Rbqp(Agpqy)zdT%*7tz~jI@0j}VIU$`(EIK)4$;o#_jPdKkxp*t=LexZ)E9qya(}v(eCT)lpIuGIOwF zGck8CwP5qKb9~GKC*mmtT-sTlyxY~-+=qN#` zq#c|ssQB18*f?m!(5R@WM4Zhnh16ck{*@j0CQ4)N>gp)O&hFvi!REou=HP6_&M7D; z$j-sV&c($FJi+SXW$$X@$!hOH`)48lD(9tziPvHXW@k12m<`=eifRwwdUm=MIq)8eh} zOB*|2RDq_6@$w6b{87#SN%^0K{+UzL#ll(I!4AmiD)v8O`785(CjNKEKdRLEpDH;y z1-bvT&i_dIkF1Y<5K?ir0je{397Hir5%&Lc@2}@Y*dIsuKSumdasF`^kf#`$2>ZY1 zh8Wt@^ByWVI0-oUmy#Nu@V~N=rVSYEqeEG?|*TSy_ohR z&C05z4F*p9i+#evB_a zsFC=Gba?EK@nA0r#Fu~j$~{0VEG)laf&4?d1dyyo@sDj6hG#0rp|1jjitJkrgb&*_5(bz)Rn}uM66=M=xc&g)@}Xq z7LKfUT~GbcAHNd8Cm}OqX;935&uw-#OF=%*ju7&uo5}V$*a^iTg;9^_T4l{Vh~57F zviJd;Hs43RSkbBdu*;`0^?Fi)ecv+8B6-{Y)_f&|FviIFc&*o1uNoru{0SQJ#uHA6 z*t>Vffkc;YbGcNP-;be`lWNkJD{7<({8@eU4XZ}JT>Wj9>o30j)#;TlVeNJ;i+nr& ztGtOCH66zh-7TBxwrzr&sk9U1ghrk#PcL>WYTR4d#lM9I68%=^vRB+pes)li5nT)dm(k!Jew_ktXM1+0WOg)MolAKsz-%}XK!hq0{idNq<=Bz{c% zp+jkc@gGW}La;%>%ac1sFDda#UHF@#{w!aj2_>4x}x zvVDhaI0nMvUIxb^4A0D} zKW#eCJ9Kw;pI-0Pv}ZVU33eEMy{aE(+q?2T^jUU62i3l#T@=}y(Kd49Uvy2_8VQ0K z1^9jM4hWEIV{hB0xj6f71p7cB9p1$-y2VNgRWYs2qd|UM1t2)?7F*=W{^M_5Ti0@N){tQ&cj=E@YgxF+hR&_-UT`JeqY_LtY zJtPoNwS5r1SeUwiCp2)BytuzPym5#Y`>C!Xn#7md?56#=sl}IDjb&H+TpYZH*T6G@ z&I2>Z&yh0Vwr<|0Bx`{o$9IzY($HgZW>omF!#>+<^Jplcs%2@8wRzTbX%tAwTNyg)HC9GYB(2xf$RO+J$ zVak5<{5Nu7aF2##5E8|YalKB5SZaQ)_p^N@j1f70)iT6fajB3KaPMUTE*Dw&fQ29E zhfJW7ICY!%A~AlbbbIz0;_)y9-<=uOzlfp` zZ5w7NVC1C3*CKFTT7$abs#l8K{AxH@@;N+9VVbr>Lj;2=O`ccULz3xG{kKxhFvayG ziwn6pB1^;{uEye0z&W`6S+VeYM4+z`&t>)eucvanpUxyQsV~sRimX<>ttF6>44EM! z(3F=7u?Fpn5!fnWObPDjoTca&_;d@*>?x zhxDM5Kcq8F;$A4Cay`|YyzBUW>`WVb(7G0LU#C-Q@@a6JYWWG;I9;+1jYHzP3V<4^ z>17guA9L+f@x~+QhOM=JxsT5RsIsvOYA41l6Ggd5&

uODx1@T=04U!BQO6g{FlQc#tTX)S{V1kLCSKY0Dfxck!xd7c>vN zXNIT|7>RiFid91Avu2%>?*(Fsxr;w;cwco&(*}jUM`XTH^u)m~r3(bP z$rl|-ncU4U-#1Shrw=`1}thLmHt`==Rv7mx7soVr0=S!kya1sq@&NpDk38?nj(-BxqzF&gl{~x#H*ukt zF9xtiT-U&qc#<-xC*PG6y{x*2$*nT%_-gQ;Et(k4fREQGA7)ufUrhm9pGOW)r3c@v z@sogin7)Gy8Z%q(vaszdO=iXt3h!-dkKaAW!g!_|!EWiRXv`;OXfjYmJ#O^AdG>%S zg@JZfy<=xHzo%&4>#+g1X2?%9C!91qX&?>=8@o8N+Z=J5)>7m>*Aa;dm8e88VxD40 z%KMND)@Q=U`)kcC{H;yDp*xlYNXsk*7t_eSk#478dDv+pTFLYo%^Y$Q80!h*Gq7F# zAicjahJqF8Gt{Y+v#d~mL96kQt>6A%*-c)fEz)md2F=iz-}~^!;LYSC}84mYFF6$6o$t1A5^%sCj{3qP1&3e9iTgb@2wg(Ci3heoGL@3~^1~`;e@{^WK*` z%+{F|=<6<*d6~mubsu4vm7aLV!(TYLOKY)0`uO>DJoNj z{OaJVT>3n<2wN=yE{o^UK25=)6p`VyzeLo_IGToG9;RU;#aU@O>AyUd)&N*V#!I65 z4$?CrDbF&}Eh88*PSwH$S%xMWJLwCaU!7wdpe6k$XKCj-K{PJX*(C9+F8 zjmztG0~8JF;usioraFuv_OxNd%GTo<@0o*Il$D9^in*-`FV+b!Jhi!WhGrgMbR6KV}e zpWut6LBmfiS>jCGd`q5do{sk|Rh_T4s1}GL+ zRVhGCu}(@8%Nh1f!zGOG26}{8fEjg{nf!M;G64`6_ddeIYAV1JnUSClhS7J7YQgUn z-NXmAQa``nviJ^N(633`8!HHOcahy4VIrd`#+%~w(tRT-fGpHo(o|bQB%)R@BaPKl!uxFjroz%RfZJ51Vq&{kwS+6ks ztWT!}l?_j1W%Vkr{^=eCB2qD=130wPL{bt>T342g!&77MHQ`p&I%;IMP7vq0$gf1u zeBk}@i<}VuLS*5$Sr(0Zb8kMJ1#1=)K>$Pcuhy?V$sZ(Guo<;eAOn#8I%QG}T3B^~ z30_Kqir5ba325?K^Xu0ON|zaDcvWJr>I+}OTTG=BuN!|sLEPX(Lmzsc2m-AR|Ad<= zUGmx@KSWY6Q#Zas@9A#bLG36M8=Vp>)%5lZ`-Kx+E01~@uBr?As!FE1*~j{-p-UZV z>7l_AMCtom(&22>bSR9%yL3#tb>+^uh@5aY2AEw7HR6M8v!py8m{(5tu=`KPA|1P$zk9)_1dgLS8hOw=kY;N&bm@d`o-s}U` zp@SRCW>yow8dS;0 zULIy^ubg7jF=0RtApI!mvX~rvjf7T;41m`hRBez7f4ayQwlY_>(;3z4HWY$yDeUU5WwD65B74^^eIJ zx~@7eJ_u`>fTqK1n}-eq9(P&uGQL38_{Usk;(Prs+jlnD*bvPH2uHcWoHIi<3GrUwz<$BVCdZVSz@j9vo7p z8u|j&)@+v=%fc&{v*@R-XW zy?sdAPBRrBgi57p2H#$EKb)yIl!`O0l_``> z)uuOjnru?&+5ez|8^a>CKf0{#c^R+4Eh@rp`^OTDfyRgLz!-^`JeVA=q~XJP9g1Ls zd|A&i0fe;3-Cx$t@xFw9tt>fK(40D_yQTHLe7wJ1ie8o{>${Rv++;7PtS>ZaO<^+km% ziPkR92u$WR$8=Op#a#&U9lr~ZZA>V}JdXRQT8SjXVUwjg?B%A6>tMsu#b zT&jPM9yvWDIyUGVw7+Aq<{Wzuxxv%8H%;aa%xeHub!>f6mzQ8hPTo}qPSUXX#Hptu zH=C&WXsu&#Wu#J7n-DPDPkgk5B#;_D545BK)Tg|f0AZSJA|#QtH$)PxnYZQXRGG`$ ziSa@!E}5#tV3>AZ#+#Co_;bmoDe0SF8+XN`!iDCh0&5lH!6*a@mYoh9X51b5pd2`88NC#HQ_PX77yR@^Q1o4~uGt)ta?zwjNxJw#SWsM^kL~w0w&=w2 zhU0UKNfGV?06@CZRK4yWSoXQ~NWNEDaF|;bK4>+YW+xW)Ze0F>)y+6CdqlJWx^uZs z0u4}=Q3STJl)WOUSPv7fOaik-;3jH(P}0u*DD&mD9kGzBHP4ViVLTe0lt9V+WX~() zf`vRmKh>Q=Tqp_~m_67g*@+GxLmSjSJ47a_8Z@Il`Kr}y4Qo{FHngQO|`Hav>&8bf{eZ%$=3E&rKQ+p*mV&83DV`d!>5g8 z|6M`1w-l)O7xL{Fle$epRIV9Yy3s%7-29I9A%;04ML%jL51Q+}zF_c!Dhg>jE4>Cz zKiA%wlR+Rq_!FnK_&iqXG_vrS%j!=pptTK|6;U4H!i_C@#74|2=KBx8F>k?b-Z)qc zZJ!!<*NKnMM4BtOt07TCwnS)P=j2^EuKO-zhic%v4EXr%R29^Rr+SK@F|Ql6#j`Fw z%&J&@f+p;iDubm?#RdQh%z%||Z%v+lyznZzHKDUGjjWt=5C*Vu^g&U|>*D~PchIY& z&0_2rouG*cE_sqseg$g9r1m$Z+)smeGC1DI%qi6haqJAuy~9MYx0_zL)$lgQ>!~al z-%jD?wQ$m3=zG|QEu-7lto~Lul+(m8O!uYbs$Acv@ZpoK1@$Z{n!B>mF#nnXUTtm$uOZZD)`+y9SPXywS{=yhgE+ zx}j0J68EwR+z${{UB7v5W2R0H3d`vw4)t4m6PN(#iJCHi1kN6jfQNs%Criqa;l@Da zT~1-`mCAM`msJ+hq$+qpdz1YZL|M_+EQ1OFB*Fa2>;065h??X$^6bP|G}Q z|MAK+6ZLHCj|~Yr299IbWOz_U{#yF_P4RJd*e+wB#@C}YY$kMh+l-nZITG1s7ii` zvazvll1H~y`cbXM$hEqY_qpz@E#BSsgLp=N_Wt(3E4(ZL+2TjDddb_L6beC*9-Pn~ zf;Gh=Nx-gzCH^Mce80CHL#|6HE>!%uw9{i!eyL^e-h58n-Jd##9yN<&o({62mA`E=@55o)vP&gu_3Q!YEPRxr-$YvZgmbYBf;xW4$XN*O{V<=+aJ{?~do z@3Wd;e?5VlIh&e?>sNN`jI-{h4f>H-_N+I5ygxoj^69*%31GB#(zk0$GWs%AZGx>b zAAZiV&$ys&>9YTUHs<&9GSb?~2!%PJiex|H$N~V*Gp+4SJjm-?ai&U>D}5UFj(=Mp z;`D?-D~!zfgT+AnaQ{n8S2fPSP*pY6R@0$0o-cGQ|Ti)vZ}^}K)y>K|7V{jY>CN$tBsVBsH)6x2{Y_h`YCwCw#9Sl z96FM63~Y)f{SnT9K|aR5&Py^L;!(SUfF5>iXsSD6)bVVSw7R6w?s26)`}jGyZ01(= zko#xP=dftlB$cwaxKoT_r#h&9hlgvt2QRqrINnx1$oXii@D32-E7I57fczroK#r=0 zl$$Zf*rCqBx|oZ=!J~5w^gpNVn2`b0ZzD%>a<~O$|V;8pY zeR63HVgn$nQbjP6B9tfJdWc0)liQX|h(Kf>Popr)=52F>Y3jY&FQ=e-B(}Vp%4Z#0 zWDgLWP_$Brq|9p~G2d1reL4!BBVGl1Jl&1p)7mHbOv#X6Y40Pp8WM~r6N%9IGwr{g z^v6;vu8&+8O+U!2*`O4987Eh!9YRw+Lv2?^^2dzySD#);Vu_;p-Fk<3Fc z-PfNL#h(VR6pCW)eXE{B4%Uu5tv%Onz2Vp(#W0NNAVvB?REjcBZK5~TrM^pC*_=vZ ze%KFPp%{9A4EA+aFJT(AeT2KsY;)15*Q`mHj6&XcsG)J-(+=^j{Ydt%)+s7W5z3^BhBrS_ zP?s}-<;b|UcE##Du;LYWm(%D*(C}gQO3Xl+P6IN5WGZ)7cr6&!{*8rU+)nTL6fb67 zZS4yaepZ69{+EW6nTSMv7UoIVc97t$ARxeJ_xgvdcWu21nx#%E z9x)}eySrZRDXs-wi25gJ5>oHyz$bUEs;kN7W!mOs735mkr|T>I?T_xnyARYu#>Mw_ z^6=4g;ecy-?YV?aY!Ooy?Jn*;6h62=+@>v6Ls)eB%d_XIvAMxk?;_5O%T*nYK$UEa z{5uO=AGM>ocvLz+9@r^*6t#(=SD7wsdX<##P1P-Tsi=5dt)6%dX#4+8BYpHQz5xVCz>G{II>mB{mPfC?e9+AL6pc;GWHTBWny>@2PRYRS;$uNqDjua9?3O($`@ zqB=BOxS=-nr~EtI-GJ@jo)oNKV!s5q=c^xX#p28u0uGwJkFw_ITED3xdaYA>XTRU5&uJ*ccGxeEviX?GHv&>TA(I z7+9hV<_|R;1sw&>8XKiaWK~)eI&Jl2`q6=t1zg#;HNNAs&*~FZZ5FgD3-!EriWWf) zBWlOGfFIkj#OHV|ljs9HWhlU{Vx&~8p*dx3ylANf{QhkwUqY3}o;^GSz;vz7timY$ zTS+-hU%EOBB{JDxEP8k(&?_B(PxP1*KGf-lh(W}t*c-uOkTh!}_q`5v1=Wgm)uI8E zMh}Xr!pd~X+7sOto0MYru3rmJ2@QT;pY3e{E}v%}XEWLif=AzB1};kc17t3V9S=}+ z_Ev2pzosvKS5C^$df&zV#W>T(x+a*r(SO{{=H2J*N~}Wh9stAT@es>W(Xq167WpvS z5b_Z6dWhB_GBfV}uzi-t8>Tl&BITz%W03*(y_q|TT>e;THVDkP`qyX7 zPBi0Oh~=7EHXhx{gEZg||1eS5A1BJiBGcwSOcbt1&6K8H7W@}=NMi5#B|z))H5CI` zBQW_yHJV42Cq1Hr*RHXG!Ph=zVEY@V7p1u7i^5Gx6P{e;o{r@p9)Yy{xFGgyhEcJMa3S~{j>RE`4qf&F*N|)Lxu=uF8e052WlDN8g5lBko`nz-%2evzR?Nl z_zkrO47eUoke)eaPCfTN__Vcfd9=0&&}nw*l{2@WP*dzxHYkU|Io%u$P`2|gd43T+ z>cu?(yfVKW1Gcg}+(o@l9aR8lzNy^S|6zE!|JL89B;cR)dSrlk8;GvKn7G;=6}BVd zabh?|$_E^5q3ztD1zA$N^}~av@2~9vdLx~|t}`o2C4dpC10L?}cc;q^@*~-t0VbAP z$pBl^w3_{RuH^kSK-4T`Z--|n`z>52Tvv2j&lps8K*-uncYqPY+xe{gsmn*-*^ zyRnus!9%cKRaA~`^(SAWP@YHBaq!u#uD0Br=*rb2r`=%ti6u((N*C16~2 zCb;p>>(VUKncBc~uQ7D(nr3|0-hm%^JqG~CuB_iok7fiQUqO4&^NtdDYPsIda4^wG+kZY;F%P24I)F|9BFgf;;G8k*(3kfWICtWJs?5dhz5!Z~3TLaOg z+D$@5Bo?ax17e>*|1D?i7SO$8tq~_JfY;@n*dHr)&6pA&ah%xw`|c$3^6;{OcIYyz$AWu6x*)22|)kU(o^LE>2Q{^sJBl>bNnYUYR1|K49|N9S)Gy zu5+yl1$AAn#?fH}(WZZy4qC z|M;tIuiExAkw?rV;6UCx=)8Cx5ELk`PBotP!Ve``<5`sa%iLE8>+Jy1`O8?S4sgmL z+hF2;@$r#Q6&_nIL!5K@li9FP#f%GJ;k9^IsiuL$2y-amg=+2RWjTgsApi zx-VDJTY%{==hqEzttK%jLVwFi3>Y&cKQCiMd=i2j-i~f00QRZ{T6(k z;@zpcQJA0{0YcuExVzmFzmV@KZ`V!K0N8{rYgj9W#+)G>aGs9_92dYJ_an zgqDBBTh_6veQo|4^~U`fCQo_cXu)WFw(LfwrxJsN?iE0gURsHb;W_>ec3ESlM)Av^L9N_(1OU2!j$kv~3tHxjWH^!g)I%xt zsn42ovqMMZ(dirh1l{lE{A>d+xh7Gy)vWhJJu}`A?J6FcJrv2cyVP#f_} z+AK$`z9lB|iWkGeb5x4a4MGB^!(LU#*0-N*0&oK+$sRDLu2Y1ujmu|&skr05;3T(< zWML->tCN1C*`E;LrFEI5_AoA<`>XF*z^*fXGg_@!oigiy}I3Kl!>so>shZ8niY zZfzHNgSxSWl%dL9NMe2d?qYk3nVnS3^a*1_WvB59o*U@7`1vU4HYyxH$J( z{BDm1gkcMh{@69vOhz;7Mzy1iOkgu2mkoqOG${WBG?JBaK5cZ)l(O)siHz-Wq|q&n z9%!+8#XdoaPhKN8BLg&+cWZ_r6)YQvN@@)WNX;cvQ0uo^aUbCvxD4sd@jaFw5Zo!? zHrq3RSk=+fUaUzyA(KJ1iLF8I{rv5be>SkXo@XAezs)`-2xd}O>*2JQ`N%LLdnBE0 z$%?i92qT`e7PF~Z#k(U7NKsS1cIxx;5<;BCQju5bT`EM$bM}D87#H*lv^euzVR-n!_99= zB%R-{sj_9Nzb@G>MZ}9i#fK+0r?KPBJ{!nkau;%ZS?1OT7OmL?mjl#hv53g3r00fT zo!*-cC92C~+>=SdM$q-ECvEcApln*cuMAOLDjt4^iMwt?`WELIOUXIAF+&XTh*#>a zwialgOmT9BtT0zpuClnx;y(w?TRTF$>R@nL0zySV07zgz?YL%I>ehU7eQ@XnQ%UAz z-mh0+(GA2bPl6C06Z0^x;C|W&IM_Z)Un2iX69kpub<0WRSROWcx{SU6-=ssgV1223 zY2nwQ(baG5?}#lt7!zB%uSOkir5f|x@PZHmu7-`sY}->m$21;O%m#*Yi6mA!q2Fx^ zQVKZcY9H5qLOoNMY#+w8@4Wj=w2|A{4p{ngr4xS%*P3}w-8!;% zye1rM#`kbTsC+sSoV{&z^zs`oFBenWf2VHU)N$0InODu#Qqa5|(i(bQ+|tL23AHK_ zED>usyM*`y#NY>KK!3MffZN*uRs1rwsSlz?%~&Kwm*nP5{2SBbOnT4}0HLye9EpY< z0lVHql1OcXPh@nGEOs8&tM*RTRk1(=-Fn~WZ3?WHRe4e+DEyc7VlnAr zbM#6orcd6K7v@9ju-_K@YqVkFu8=!^hF6_~P7H6GKFL{d{DQIM{f?EIbHL2U+Fcfm z(|UK{(H)7W5w&SL(|bxWlB$cGn+S0l$2JpI!&6}_<=E&1=-0QH+Lr~NCFEt0KcJC$ zX#C9Nb_RHPi7(NRWCz*0Z;xNv2B|rXy8$xY#oaDftUR4>#zeN@kH8~o8Yzh7|HWuj zY4-X2lTNa6&mhVPjsj@qVby|n4`&Vuhw(!*K-;gcJr*f>oBMD`wUAVfW1qN>5^?YH zO>ls->H6~%tkZ&TiJufp`~hD0!$a9Oa_6tq0QKt|oCRB?4G!f({eh_fk-GdsepsQ<5c`x(Xq=3%*0$9W40oYVKGddoU z4c&iy1zeLK-BzUN@R!vaz5@H_W(l*p3itCKJmzaS1-4J-8Vn|O_^@bzt7cW0>kaTy z{U}4);GT|rjiV)RU!5?LSkVlJR>$9~2;Q4`AFoe1uP`juSafEc-tv4eUvw|yIn?=K zy6jPQZk-Af@t1m-5wZFl+cON|Kr!(X(3hgBmacQ=JH-_rTVtS*6%F{VA>Y@b^^O{d z>;i|m78I_0;mzZSa>)5AD$9mqH6cTIRa_DoM>?`q$+cu8 zIP^w&05b0?P}!yF^tOy%dx+os7+VNN#r4hqf|RXVtIFc9-mm*~82hP0sX3=O>pD|GmwLGfX=Oa)MF~h zfV$l}f_*Plk!PilrR=)aPw04}&+4{?6*Gf`)OqF)qEF`$g8UU!#tsADGX25z9 z-w!I_#^VCwcIvR=(Ma=f=+?N?%}p_!36ywDz%fj9tpv)FJ^5h3ECLC5ghe!6cE#0z zBP7vmVsKl<8h%IvYlx}1dIDRbEJhfn8o{E=oR@MYnCPS>$8l{5ZGy;KeBak$;!nIQ z&ZQZFpc1mRlEMNi*G!z6JYSHX^kc9Pldu;BpFvlJMNIr6X9(Lxjtq8L0sp*WEtRzv z9X^`;q-5A!uw)iM=CAxRP0*gQk;)sZH|w<&d!^*l$OP9oOmqqa7wA1WGawY9%;s*j zEc@@31SC0YA9ybv4nX_3=oet3jX$FuqKy6P$?QI6GR-&p4gx_pxESvoyT9_L)%UW- z>L;o(mOW2Yzis9*1t`5bAM)Ovt(CGXN^Xd%+=LnJ3L%+pFKf{JDJk%3YM;ZBx9~7` zgrj~{vx2G@gOU%=H|p3NPokO2(PJDF;dHByvAP6bUrguqri^g#ADK~zA93tnwHJe# z(F8~2Y*NF}u;7f}d_!TKlAJ_~w&b}{EI{$df3+qqCPNpU^QJY= z#%1aPjVsR2R4tAb-slRZ{Mnw;M4c~5H#B!Ks@6*h;7WcEO*T|zXqxp0v>Lb zJrE?)`WcxC@)>Ju=}lZv?jeqxRm2;YLmCI&mUJ_UbD+J@paWS@9U?ZB8 zfCm<*h$i0n2J`S_38JPdm!?T#%OA4$CgdPv#TSUSx|mYkY2?Cp0gMh}%K+0X&K?M1 zC}l+TIf8>E2dxW&ZJ~ZG-*Cn$z#qs<(8E~v zHa%%^*txRsAz?$X$u*;8iA^ZRo)kl!t2}gnx{VJeFr2A z${*5a_Q@F(n{(;1_lE#*BAP$;3uuXujp{|5y<%J#X8r2-cc z=?=A!33sh-?#nJVPf7iNQF3skUFQ;o6g_l9dk51#ERZf%T4Hx z^y|k)jPB|=00xB8VOeAM1%yfTl(m%9&7d_JN)W-kb^p0)+0*>C)_1zMw4Zh zGDXk^Uq|+42e1{T|K8iDSLn>5t4=x(s`%l~M002!p5rvUHgL-~G~vp)&3~WxgPkx3i9e zbSamtRcS&a^jIk91>AN&V0q<;4n<=oR0S0~{!Od>dzf>-{`J2ATy#1}CFSES^L}(V zMIIWYB#WQH2gzr=tq;F|-V`X;m=}udH`uPk^q(lOcdlQZh#*BP%3FYYYsLC8?4H3o zcfj?V6CH;o6#~H*P@dj-yah8+o^X_PY7PM8c4K4tM?%!Fm0^d-;_~$qvZOL8ks4~8 z!Y>Tk2Pxhcp;>roHxQ246_62skdO}K%TOtjk%~WWUcZCp4kO9&9hoZ}Y94 zxuX6&U})ydW>q~nqvOmSX7#zI|fb=Vs{sG`Mbtb?JV}nPqXiJVw zV8qMHoRr48pzA84c-71EsrKWVAn4jI{o%!MVg6b?y$B41y5$q*4;4^bNrRvoCp}MB zrf1q1@k=iltzh^??mkOBq0ph4vHgk=h}9*Dkf`diBl)%B>qXKU1+D)nr0+`m3?~)J z%w7o2{_>$tUZBnXJJhVI^@ocm-%$Bw;w6W&yS@J#-PaB3xDjOcfW@gT0+w0l$}<$_ z4nK9L2!-e5uD~j3TcZe3R+G-)U>+foSWGF#sJHWaYVzIVaYjuK;;vMty}jdptMgj?Ar!Qup;RR z_+Hw)q-}rY1w?=NIWpSCh@Q{BQk4nobsv}34GpbR4K~b)4@N3dn$Y(>>RSZD)qlxZ z)ppby=8xpqk-q6}px9B=%lk%<2a1W@5vP?(1>$I6yx$GhS`J4^nin2Vw6Tw)KlGuZ zx^0N1Naxvt&}YRX%k&)i^nDi_gF5<9B}yP9lf?^jbBz9x@zTIFmFSdM^A^BJEcVUW zL}M1gL<(A4pXCwR$}xRSCKN(OEZ%hOTuHqE1a&%I?HG=0u8LH zAE55lYe+^F9sxz)))f*ONekr6?SfNIT|5RwcXzH>bAzGF5 zzSLSu##AR9Z&1Pn@PuiE{ULAaA!JzoYAhF8gUgU9ix07G(JF|cQq#KrV#&wjxB8%q zjz`eh^PSHSthxkp^yN9J{KKvr`$k%$s@6E=L_~%x*LdytwByf{4ss;>)3~o#*DS{z zn#?9)+pfl5gzJX5#e&s<(f)o5h&M1!xr+>~mI(p{MuVcTdhlHa4$K6+l zMb)+a8i<6Tq=M2CLkKD%T@o`ebPXv;cOyuMf(S}8bV$t5NVlK}A}x)iAl=ePowa@6 z=X>MxIscz?UHst%ILxfQ_geS;tGg&%hU3>aP1?{~mskxjmo30!Z1e4E_b5~;1gA`85t84`GoDGz7RDoe!n&cJ4oSqh2FwkQps0prKZ>>i&`aNqS`` zQ)e3!6Z`>(1cRuygVDr0W^rF>mla_vSQV*HbM0Q2w`H)K4|`+EL*1p&{BRd=g?H+r zeATMahJ^hREFpwwb^?am^*yg-yyyJf2i{7564kriM-HDK4hyhYU8PP)3!~~O%e_*a zsX`~{oFz@@;8fyBWk7r1{LNF)^f|EuVm0UD`J>oRTep$CTi#=PoE!q}q88)TP7VI{ z{lRQ5C6;sY-d39?HOA_fW~P}%FZ&r&1*RcVrE6N{1({9)LgSx@;F@}SNom;|++~{h z{CWHkPw_}@0hd3fWc3XNf!SR%-|Ezh`(>ZGuH5gcX}P=fKHhIK)QNZB{T)d-+=_wd zif;NFlTaA;dr;GbpE@`rbw6lRoN&MCcy#sAy)=N+5~Ufeix9bNDIawGr0H|4Flc*% z+m6EBSmeJKou<6AqWi!DTk48G& z>`>nF)+V(lXfAl3m7-XLWSF5l07=Z$uQmnPW?foG#L{E*DZ~s>Gv7=}zZINc(@#4+}n~oYD4I(l%hm@Ig@$k{A;?r-o$ypPopPaEUD;-z3@t`#aXsSEll4; z(2A#@d8K$H?R}?`4wU5N5&Xm+ggXXk-WS3xsrPg{XR<^HW$=g`DfO~4uW|2ei6ro- z`ldUXY6N&LdRNC3s&HGkLb|k>uHzMjwtnxz0`JhX00S%9F4D#V`x0~3v;G{6ooRbz!St8+cp9%I zVmE@$TT-rmZo%qY0o``Hw6iz~4SXuopb-Qxdd|spg>l2PJP_p)-;w7M zILXmbh?>944FaJ?%cXTj6Y7QkEdeOMa-N^gL^L_;QjZ=>${A!Uu7=QSd*< z82QVxg3y!D)0gDp|M}+s`Ye_gFQ}*Ja7ain({`bfK$1C{r z(}OgToAsv5e-o=h4P}-5-`~LfDoJYeLlR5&3xs#7-|BhJp(lV}dQI%$`^DePtSF!m z*8w|cndiz@`X9oFugky18jvG;A5gWm5RPf;Z1tlDByG^FL;lruAiLObM2bIrJAn^0 z3y`MC9>fYcWy*#+3yeETrzb-sg8(4E+^Ob4I?1Xbhyb}W!TX~NDKIhMvtKubq}g@) z@wq0Lj1}w79Ylj45i|^9i-@l`{#xeP|KBWmqoH`Lk*}u*fYhrmQR=)^CkJ#f-9AvU zxYiaCuEF0XY`G;0lmpnij#kvGIy=o*sKjYMlkVh;&w}ujQ@HqP^;1AI_TzkWwugwm z8kZi?;VzNw&UFC%m(>A^MmWnUJ&@8_c(wS&O`b>3^qx!yP7E9QtpT~_28dP?^jImZ zs*N-9t$A@|exTarE`LAE$fNay_>x0M&tTb z(mpeke=82aFx&#hO#z#g;v3(}vg?4~S-;ADLTEfw98mFfuGLx_bcG|>9D#mB>XfaU z7KxcAoSA}74JgKZTGL5$;J%;=*ih1)trjNND9p(k1<2d)=oe-CwkfC%&|rvh&AQ2q zUr_Ry+8!9aI6TjQ{nu6QmQ%n}5S*e=o7)l!MW8^z4PBtzJatO**-K!ue^b3l(-#r@ z>KTb;the3IwPpF!Nk9p{j|O%UCRKRFN>SbKtfnrY>aoku5&|^YL_t~iGTY5{kbp)k zAM{V^{oRMso7DzxgPJE(vu)8yK(E;4{>rJZL=@i^Lf0w=wYGz^FZx2#hYNTA$%y}2 zxk3tu58|ul5wibKH%83mjGCDw);7mzpvD*~+ApKUf}H&us4t2#$%VO2d2b{0Jnr`^ z-j`V~;G#1)IE#j=*+zxl1Mxcd&3vT*9U3j`alt%II$%cqY-6-$72>o!l39} zVxFn?2r)r-b|5Erc@sELIp-w>rHlt!*ed{n+nfSyLJf>e?;fEjdaeu7GH`Pt-bQY_ zy=HvpS&Dm=b`^~jIRGjZM?lXSk_WjB8w}Hcbd%mMtI!i?Uo%JP=Z8USIqPg-?M&tT zmmlJczB)%!S{+5u(GA%Bx==Q@+%=`QQMFmx66JA411ybZtE?3z&97=M}_d^ zT*C^d0Ob8N*^g~7>ureR;%$EkriGP&#bq_w3j-w3*0SG%=l-B8{3(;{n^)BJ1I(Z# zRolVg>_7cA+{oV_FRzC!ynz#K&$wPPC&6_a>%LTRUx#r)y-CwHtTdC+Y zd2h9`u4jgeZE3M8zTh!d6*}b^p1ry>m>)+MYEC@}Me67Z+Z*$@lXQu>d{iwjt|V0- z)OjRKKGMlQW^35?kxin}_v983cC`~<<*aT1i3msCU4YCdMY1y4uB$qZE}oB-D{#3` zrG@%%1TMc5YqI0>jm_Q%%s%j_mlcN!?Jnd8Y;SpZQCRfxlPCNVZ#fCC~ zWC|l}au>346=r;4+)pTz3r-J#;#9bp=)@EzE#>Ea3WZ-lp;4y`pnF<>5{v)3Tk;&$Dz!M#W9rQT8`PVKE zwa&$E)E2-0XpVRNSh5E#1?p}i|UY)n1P8`pR7rUl2 zeVVlR6S3Ao0rt9ctGM&8`QinjiG zlzvKIpSB=^d>_F&{#AKWh~r8oRd*5sUNIBGQG2pC)>ByAL=bEXQFOHsVt7hRs8aTQ z4mi-4u4kUMaQjYEXn5bk6NVMPyu6R%9)oxcBU*b;$^P8vh}8dXrM!4tRhwVi`T_0p8)kT0f|KX z1h&Nqn77UclCMlOGjrK&djMv+4)m!VC&1wB^alHZ_!326RvVN)0hBR*VEZs-Q$nIX z$ZW9^K1}+04b#s>;$MyvwY3YL`UNz*j`KZ=HzJ-ur{X2UpQI6giIvcyOpJV8S#($9 z>`TF+&>L1lQCiSXpOO-#rnryhnl7;K#O>X>bi<|{rA}^6aB;l?|cLBKd~CNwV;k@vq{?(^qK-UO(U!I$^=y zkT`S%dgKSkK~0N49elTMHsD`7nFu*d%vxbyD=z{6mlUrg>h!~C^~$5PSf!Mrc1GuTQ6bRGwto11zaMJ!^|#v z-Z8bcZ(<@ChVl{XXRq6sev~DX<$}E`cdRApExF~N<&De?J`V1?Xv_+4Ck`OKW`;Kx zG|?*c<+e=&zu&D-TR~6K6@LN$_D!KSAEjg~*8nqeGt#Auk8o6M`D@#u+5kLK5A6K_ zg4rj{g{$Q&p6%DRZRCd9GwWFmv?|9$!|LV8+}V*FC@P&%@DR&)sDP{Z0oWgW7GjXN zY*n@r_hD#MgB^$Xr_u01qo>iiOU=LT|pV3S$Htv~qG~EQtw%&E4ldeure;8Q4)q))wRWjpS&tDFN+9dYhG!S((#zTef<`pN$+etv zXUDqxC^PMB3D`#q$~Q)EroT#G4K2v8F{k|b^9dIA4k{}w%tY^Bow;aJUQ>c6sq%a* zG4-c~=88vt+|R+8SibQm2Bh|#k$jxvan?bz`1vF!0c*}fYZ+@Fry#+V>>K;#2QAx@ zqx>?=W}310dP)1()*^W5G}O3tYWrxxgJ<>0?xBZUMOMZISu67EScM5dj`q$$0dnuo zHwA)>(~M90OT2aGOcOnMN|0pCNOUnji%!Pzs)e`<8H?g(TvJo$_$KYQw(ytqb5gpgC=f+Hy2Aqr&9)+iK zW~v6OT+Kp@TxlxSD>TIG$8PU2ec;uFh{Ok__N?m0k$@gAjIMBDo^l_pyfMW;NBq3$ z8PUsv$5MhJnyhoz%)~o{bm}^v;+>*7-oaWeU+}Dj7qi`LD*o<|7QF|wcJ#BO$(7Dj zMmL`?rIHRY|E#4`5ElB*QjSKEaow>RWpEM?-MJ~l(~&{H^BQPtulsfPj#BD>Pr^qJ zms=GWsOLKOh!=;QHF-x=B2}$W=t{*mk8`zzZ##(PWEsB7E*#$|aMP;Uq(1aDKx=p( zyg{@E(sWCe?8?SpwsraO>1eK76*vAlL6xzjl*L=B+xkJTVVqcf#&yMy_e*44*90yt zgQ|GdZ%dWQ)k`P8U3RHDHVe-<-~+ivHGW|zPn9jdAW-cI$t0v~`<`y6dBi_XjdQ#4 z0MG%FEp7#8maFMEc+-2~sl?YaO_!}6Oihe0vjGk_t8I8_vw7>4Y#=Ja#Q#1}$sSe= zEC%QVHIKKZC9oI;`Krtm<7)BE)-0rMPZ11do?u~JR3E+z*8xegLQz6o_MN-i9@x?1 z^=Md5EkYe*CSR>1^g3PdGM_ig&%5x%ATbpN_;&ol-5M<7#0<0MSobJ20T zYr8}J(C>$({krj4l-q{z zVvFH;;u=62-cJmv;^fJ`)o7?(@{P$>49*r;)5k9~!kcBZqOD^m}J za+9$lO&RAn&c)cgsl7iKE>7MIMVbcP<8&}ELD2BOF3ugCGknBshBstisUws!tG4oC zGb~=JO@dJUX>k^p_MHW@M@x3QULX5%A3r5|TlgZ1U@iuwE&@ydzWcKeg6pT;V=Lp4 zkLSM)`4aw+{MO3yfGzi6FZ#BfXO|j|j$>KYMFN6?-1tdFq1@}&I88pe-ybf%gAsu- zr|%+R0@J(h>_0xa3N(4@#09*VY$lSvFxdt4N0CB#m{A6+3-axk5AtPv704R3+{xx& zl-q?M+VeQgOuoD)8DsEV3FI+Ab(amGP)&BcRUqsFr*bS7dZ0xn z6pOiSpFnB-x$7#0ydZvISEt>`qUGm_laY^ao<#d{#wuD_EGY*6PvC(?YMo9m-SCSyx11CIeh^g6tjV&=@T`KX$ zuE2^4^?+jJN0T=sI=7V#o}&{`p`~S3`nK2O4|X>_hVI?g=S+bO8Q{OJin=pmjC?sK zsT~z0dBc!nh|u_#>trfUv0}yx^OaO71~K`#hb$dcDHtxwN~B7%8*}>DSA*EC#EE18 z33wR!ma2cd)L)R>8A8CU)u$^A9V1K~W1BVZw7Q}BZv@ywpO)7K2*r3c6<}n;Y1axD zqxjeH)T6O9YH*PKw7x% zEarR+-yieUN41k{oGUe0yF%Hoh2YY+0d2;YU+kTpn256uqf@oGl-(vftt3^rOlDi> zNL~B&&=0lw2TQ4V0XTiLHsqYHy2Z66k=!vFZz{{^1t{bBtyAZ3;-?uk3(oJv$FcHJ zFFr6~4hS$m*wZKPUj6WCl)a#^MPm`lj2P3iufe1-b|;c}E=Ls|JBF`iN8fg!YI41) z$CDE|4NKB;jildWg?u*CkNhg+L zucPhDp1jJ26*a$}dN(Vu9R~W&r*t?(iOwnMh*V97#$9uRjh&UlLb+$XMPCy`$W5$6 zzp~*ouivSUTeub23%X}Alpu4UaXWwjTW^xt;u>c@GWL1a#HBBA{*qX%rmfB+rZcuG zMPrIzNWC~7=Dwj949Q>nY?@z@bYm}k?vKg7!XR3y^XsNV{j_lCPsbmPD2aaAdnGI@ zV^t30@SrKr8?JctdzmTAfjWJ=Wi5CRx8u7nAH`Z2@L$j~b(NnPcP2+QWZBfC6rUz(YZ7QQM6`zeDx3;h8(wSXSAz_-S-VvfWjT@nuk1+7*R|xj!At^pl)Qt_EGFL%#-A2uGx(WbAyiZCDfV%y@ptN z!H0sIK)Q=S?srIs?Yvmy@y!4cU5*G?B{DjDlnT1LBamLd6(Z)I| z4oW`iFv$Rrr0wh*=VMm!L%1u~)fa5VR;+9K+yS>-C%te~M*Ayg!n@oN@KlF74INC; z<2JK)26tIAqymD|R2-)wrNx}bl0<_vZ# z>%={JuV~P8j^yGG83ecj3x{Zt)=^oJ{RnAdr^YcB@cX>frS zx$TlfcGP>eU#6|gu}%Y+c_-zTL`e>Y%eL)32xp$SPHicxhXQsYXMidng%c{gfIx_| zUU__u$P5C0_Ux<)p%DU_H9HQ4LR8f~1x%q6UH~G`u}Zz0%I@AJwZ)>ok#`NhmQ}AQ zvP%0my`*xg0T?t*=05OMx$@GEP=y${S>8&dpy*F7{OIpjq&`{VPT)6iIew1p<)P%q zhna+JdT}ae#SuLBo{YMRoGOlx^e!_E+23u`eaVwy5t~;)7HejG@gm-p@5RNOG$?m^ zQ&%!v9qRrPv-dPuOX!reUhw*OV{v2>r_^`M)2ieSd4JsDOAvBB_pk$jUHmpG>6u3M zD^uk0xdC(L_zX_h=>7VMj)VGf>!t8o91Ot=v4YkoqgL66LKWlX+ni3FptB2bum}1A zLYHlWyYg2F-E*^it7Ilnj10EY-|l~6vhhq<1z{+v!p%i3;cU6xKufoHZ}$nN?y;=D znMhk7mVQw;P7^iCp!fr#3q;Rj?*=uUqeCRAMKRM2I^oo0*FLy!c^joFA)1e}Qk3HjwfO*l;HUAxeH%}uj~3>0b2Cks z?>o=Rx8j{|hc)+!0FRp7#UDUwu`|py;Xa!Y=rH`~!!~}0BIEmo06$k^Dub*|YnBq; ze!;SB-k&{9qfKR-q3;a1Jpvl%{0n?L0t=4{;UBhiW4&o~`#{=9S4W=v-IChLMdTXuRiR;@gob+H4K zKayGiQ7SMr`H8zplroQ4xT$}m^Hp}B-EjY{l90D$fyF~p03A&Jtj8{}X81Y#xrT+D zRveGK=`Pg^vC4I05wAU@sa4Qy+vT4N*h7Ohmz?`0r@r7p$lRDB*YKt@H=qfZ>l3(~ zRP;+5?u$1(@;OcQ_0;bZ95ny2=E5(RDs13pCb{JvgT%%cuM{A;#K~|G+T_IrM^L%g);z45JwKyms;%`J99d5lG1e^;#3?#mAK1IlOy>6cGBZkIvE>n&HiSiTMRLE;oBaV*~&w zixyYx$qk_q5RWDG)Sm}Un;WaV^GXLLA%k?vYl#|%Iy|WuCmjW~`LC2uLBMWiw1j3azR7T_1J``Z>2EZH=pv7UXgtaJlE z1h06jZuD2a-4FzDJRUCyz7+6#9&-U=IKRN3z3c+G{tt+O@BpSgH_PPAPC7p8 z810C=5+A%Sn-3;!I}xoQccnfxP|GOd5HNgo81)h^5NP+}*3no4en}cm9zsv~i?|v0 z8qOPqWZb@hk263Ff7Uk>N9~Kuqg;453p6djU(L&QUiK2|`b+VG51<#K5|8TCMFHx~ zuv|yD-$Ykaa`5dt*An&<({GyAAD8}v5U%3Q=Ghd!QXpH=%2o^o6c*55-Y0>aq!}q1T$2T7y0f|( zl=hjlnx8?Gk!n47ozawM*rST4#IG=ZFNpXzg{@2G30{AIbm|Y^rr;MpTBf!sn664O zU*i}WmU3zty~R1%U8`JpC-1uM2@!|Sn*6ie&VsqtM7ILLX2wspo&EM;6(%OD`^MgY zMX>q|sDWfmR^FR5Zzvt%PhaE`Z=aqwX2bh4-3+c!4m6#DVmjL5Z+^p{5le)(e*9}}F zkHBmSVNt)oh}PkM60NtZng}?m`G#~dqsKOzAWiq3y48F8zpOM`y9mV{vNLZvAckbcHfGgmzvzk!mp#Y{|5``wYR!d>#?y8ET*cV)f|fdT3Y-t>ugUnZ@*0?Ryvijjr zBF6vz^0y|D@Fdq(GY%*N?Ee^8X+Y|(P0>=1_^(B#{bn-d0~gL*NN+{#`3W6dBl3ltg|$^rJlsssrN9hXi@h0?V(r_x^2H66hUUICqvET{usfhqd(A1|CN^Cr*|9Ob0PP=nMkaRH9o?qm5H;MSvs zHR|Y}fXXSK_sg9VZDCOZvHH&fV86KQD5~_Qhw-f!j(y{ad#k-SUOQ|kOcx3_+5o6x z7ahmpFfdF9We@;0h_(<;i>@Y+6e#RefjKB{)oBZI75&kyN`qUO6DN5xTGM|Vgfo!* z0fdw+0tUeN_pB!hEuD`b{anfSx)^8FNQ=XmT~#5Ql9A7@=Im6Arn|od7f2h}>-PcY z8Z_P{%y&UB*WGKYIak*g%mv~qCD8ukN4SI2X_BW`Wp582%snSCt>f}Iz^wG0yhkcF zK)19x%~|~60!`CXKw(4XAR&E@89Baw^@qn`gF@tG$g|)2<08MLhnRJt7!{-!5}4PO)n(2=>Sb@`f}q zAT%P}SKEv9Wd+y)G^q7aXM;-0-|z;$_{V7QT_|k* zT)BwHA5G@LMz{O%3XRYJ7r>O!xTqn&$(<`Vcs|$x(-?97JK5`8$&-I8Ao`;C9mKG^ zKt3^)AE4i}^?+sceI4K)MV-ZgB%mGZ`&XQx1%SPQ9?PrhFkcZ7l{fQ`IV zcs~B|I1qi19CH79K zWLSFL!67ur1WE##-jbv9nI#rGcqHJyssjxvI(iD)t<~ZPugh1d{B-I|v8>)#>WC5{ zNq{{l-1o{D!Y8mT*1vAI>+C>b5tO=?42@q$fWYsu$FBOnK5m{II0=?ZG{1m%)g>ok zX0rYMeoOTU-p*2gD;L-X48!|C>csx2&AS?Cickq5JwcHd90+;F0Uu|uG5LN1Y=9_D zj(3dhd0@Jn*ZBdLzdJ-n!=X}SN?1GO&%XgUDxfTLeD{##Z_bB3CZH@kdhdeK`PcjZ z_v=O-1_}wHeFQnfpB^CKa~FrCZU1k-0TnekP^?+hfV_+eA{wFclN`2|Ki*K_o4NR4 zG8-t3mx2CjW%7O93OJ#b@!Hb=bQ(dWfxa7dZLT~j$>{cOFdzFaU;LecI@EIte zpd%3q_VJ+kbv7kK_y9xzv5K3#^#zv0cc2O;U6m z19i+GR;3PMr%>_kJ)b$pP&PZ+poMjdHNsS8H0$Z8PS{b9VGe>R%K)w`zet3TjMmzd%Px=2frcx!C zolemL$kn02i^lNtF&cn@b%fL_Pdd`_Z)pnVZOED71xj+vgByf>O82f&{-phT?}=O| zt*?RO{U^}8H*qrj}D^MZ90`X*1GO4!Q~ih~LR?Y~M`To=Q)JH_-Wr_luMH5Ekxp#c-V?yTwLV zZYcIucu?4G$Lmfy8~d!d;^J3%{d#>&!Nm|nX{^{>ue9MBz z19A@yFDCrwfBomH6egVmDSRiVo?d@=d3qHx92K>}W*d64|N6uKcxg8>3HV^`l(_QM zKfO8Y=isFipAeZ|_|qFJx4*y!(gu$fmKgr<(Ik-9B;cB4@B9`2_lKrG=Rbrz$E>;H zdnO|K@2m0G!sOtCm$shai@^WWXZl);-#K1OmzEs_mh$^Jc+M9`%dLKZnWPKAh;ltb z;_V>GSBJMy@d`p1+<#|jfo^pIoHlD%#e}FhnM58>rXLr;U-qYFjKBiA5c`rgp#Z#5 zH+g#G`)C%$Axrc4*zW^b52102RY3Jy3);A`3BR)wc-25l4HQ#l2j&>qgDT$reiv`_ zO>y7)r=Ugt9XSRfs_ODYJ^?^u9_o#L&rAe@=vp{!<%GN5aJuwmXeeuKoS|2n9gyeP z11+cvcnYHHK!jA{_&c%?<(~-h>={GQHcUXkM$g>QJ_u(zdH@TIVkA z<*H{xJ@YT(*ISt<)md=XH;=mo7hIviyW+b*SXc|jv9Q(a_ccuV#N9D)o;d&|*94Ga zFG4IIm@bIi2TLL|EHGw!zX}LyAGUI>U7-^j_W?{ec=MIpRlY#vy$&{zPJFC$75G$j z2^Oi}e8A8kvm?>n^@*`UFmur!L>haf2z%_&bU>XXc=-`XaMuPVGxi-*mJ;;eF1wV< zCil&1xF`ioLpd71a=-Jt#zwAvPAq5nSEg=nhakJgE1euYOeF5<31?w#7l=yih3GRh! zU!F_ou&Qa#k@ncYF=1nG?sqSu>1rgG7c2pG;0n5*dTk|C(crZ>H8W5yy9Uv?$8U^` zRd80C)Pm3TD~ko0I1A<%uRysL$yu1MDw2@c_n=1PBmpJUzCug($V!H{PDBtY7ZGyvYc#RKGuRef1 zWv0gK{T?bivQtQ~;+dJu@+OeWeN1_@8CGVDyidG-rie2k8} zA%Yv$5%q8%RB0T(1TjVVpEzG9v#_VukH0Mj@;MJ+tb+J4OCM6@hLl*Wc~b$g z;l3yaa2to<+cvjP0{0Q;%@*u9n9*Q=Nqy>lYk6d?@$6L9+p87(Hfm-K%ytbf!c3?h zV+YiOwm#o<*onOcg(bbGfM%*jler~MNog57-%FgK&o7N;w2`^g>mr|iM<#m~G>C3s zkldc%rKr6jOsVS7Gp_{}xVlR}v^PCIR4VJrTg__IyKAiR8JGCh z4V07Bayt1k3uIV<+#ZR$M5g&J=K%IOh8gMS$4CMBo+s& zCexO}0kGWt*KS|Ip^v!aU%ZYfLaLC*cN}pJq@0__=Q|Us08@Z=OQ8;o>_+o z<*1qRvL@EDvR0D9v}Z^7$=#?;OPKC* zs=_RjIA0Z^8J~4b=~5I^%0y?MC{+i-H_2CDC=z@D(?6RaXr!V3RWz0f&7PM(pXTRF z7s|Ok8^n4CYBQ3(Fh^Mh{p8BX>oHe_4u#;Phf=D(6@04iOMv3*sZ1ft>08A?Iw zydA}95{j&4KO3`@S73J&u>jGeX9MYdvZr31bTJI73q_-)VCz00*>to<#k2B3V|uB? zNm&C!jyEAmQ!f`a=01Mfa5vf}Un!io(S)P%_FRMt111bD)Vo})KtjLJh$dI(dG+pA zWgL5=HGq!Bo?LD&eFTtBD9(j!>akT{^y8<&T2oPe`LVZu@4j4f?=dS@?P28ViFdyB zJ-=W)nn+p;ikP`wJ7DT+J)E+*fsH@e6wG=XrG6$Qv;FO=;A{j-2n-ejd?0T!J%>gDFC5Xd<@`AoT{~3!I%{%7)#0%oiGk*`r{NafFmJe;z79ObM3H>DUE}a z=u*{oI1zDtMM?X!0$yZ!bpZJai&c}($WSfOl=6XreG#;kTqgd#+?3pLrIf?uHo>@; z<}B12U#Rq9GpDkbHOs_?1u!>qH75JSl<#^DzaNwj5-J&~YCZvVverLVTYUGWhFaTF zb+9*kf~9o~IoTGMY3TN*VI6n=8h{e(U0mI+%`W@^&r)|%`7tb^raDF~J*r)(Cb5qt zKOYfX$crUbzRU4_t|MG=a_)m6BeL{Z2pCTMwp+bGP)lwWUi1`2@Ntp6q*mUm9$>tj z_8|;(vT6}68ztzuUia>v4W)pZN}F6H=Z7rE$Y>v=-e5aAJ&K8QD)1C+3hJxdLHpS- zVXr%dTGZ~JKRt-v13^1ZtVTwMvO^TIGzV%AIWP%}Clu_7YV4O1P!esUh!kNo*p&x+B!GcOXlp&lgN`iCb8?V`R)U|DejFVTj!z2!bk);7@CvP|WDF^X3 z&`oZjl=#vXPxo0)||YWhN6=3K^bKw#InJ z$yM6#sH@C9)cG!n*^1rsBiNP=pA%@oU~$VU0bbM_!Eh((DbGoc5$ke9p+oj5&6@kmRP3zUk7+ zQYD$mSBUyY{lp#VS-OCItap|{MUi$NTP%%`JB)Hy$-fXa{SxuYcqEdWuMpsS<-yL# zp}WgFXm<8t^IgU>lyd}U_EMr4z1;Fv9A@ykU0y(+!nWQQc14&-2$%4$lr&(GhIXTy!#x)+dtbxOL9fYNBRk5eR)z5_7mS-t6)U2&2G z8CytngNHsNBMfCJc+DVZGfcLYk4SOoTIm(l)G$y`G;8pYvijkwRpx$TkU^6Rqma90 zir>`Q5~UdPdDWA@?V3~?PwsM-KIlvH7PP8Oj+HzEnYyoo?`^yzNe8*YKYfh=3Uz9x2&CiUVz3L zla4Sgu4reqEx<=T$(KPLg&z`$Bh-80FT6lvhBqlWd9WN}s z3*LbBY2SipSo!d$$oFf}ff2p$$~8pOWzNq&5xwK(4y!C!?YQt2EAcYc9D^lOj{C7i zg6Ee#Jy6dRxLNPBpkX*GuH3E-t6?5Z?2)8d5g=ht=?hR5DMbBq)m<$1x?EQ>nwtCWY3#;=Ot~XJ9UL@ z@T2+KL>}XVp=yWv-PH=M{irJOu~%CBnvK?)o%6Ses^(`nc67l;GPte5M6=v1 zrEq;;^4tX!8Q#jn<-Fsan`&?~^}d~v#0yNVv!rhGW})7(3=+PHem%|{BKxbnABp<+18GxZOxIO}8tA}>o*V_^Ee zLQHpxuw4J6sP(S!d}nPCQ|ashcJ<2mXXLtSYVfS_$S>3iT3TpIGoU^sx8FD3lA3F)O#Y7CWn_t&=^w-{7Oqk7QcU9Dt7lH3vnn@+GGt+T9zu}(Z z7GiA)!8(7|CPaj=P6^H#AShR!un0aD5SIy&rhGo_O82@7GbPpMkU?t9Dc)i)&8*2_ z)*6FEuqMyvKQ<5sWmQEIQ&(UlU9NZckP9PhQzAA%O$k0L42(0(s-A-zMvXp)nhN<( z^3blj?U#p>NWtc)d$IyxqmCLl!aC^=^<anws8te z3a@eBEEh7qU`xvyI)Ly<|A}1e1LT5LY~r!Q>57}D88G9n10(sHty2K2)PU+=SO^k1 zWcn12Xc97>@dO^klrDuBr_S}{-WrlMPX6hW!1OX!Z#ru9=TkkQ*h|rewY>yFz{7#; zHX8XxKmh{$^%mW@_B{c7@N3Kr?Fujs0aZhC>bMEk6$}(AK-VtVc*{-!My-@D1WIEj z4mR+p@pHbb)Wx(g$bA8DxNgj4)K%;umnp^PErU*e#^8wZ|< zTutANA;++6-E@NnUFTN1-_f`-T4PcP;_7r}vvd*aWm~p)h_|P)$E%=839kA~;T$Yb z;aGZW=~d10xFcUQWt1B#&@YBI`kgm0`}$_zNM-JL%a zh8+X&Z6kYfskY#ORG@Cj($4yXux_uS`l}g49^h^s#xgjwq?r<#9xB$Me^|PZbK)Kf z{R;fdeJMt;VF<5Cu%!;flxMnTd5>6Th7yF{``G{uvQxAC^06MKlJ!<-DJwc-@Vg>@ zw*2o+F!~^(D_K$9omZwWW=O5lnhy!vhWza(fy~UTEwNa6I3*}*8-;RoI!1f7K9)H; z%{@!ucIA*MEELJz&soaWMfbF;tBL&TvibEc%kS*e_va;~Ud*oXaFM}#hdO9V{O(de zE9}X~1q4!VDGjpK6Gv`)BBkx%0Q6H6Tb`~xQQDmZs9Wck;hC2=?H4-&QqzYTTy{TH z=zEs$XC4sP1zlsQbI>|SB`oyltEa0lF6hD(9tS4|ENml>a!e=tM8f9`#t`#dirHU% zp3;J2DcFe)hhBx~!hj~1y2F|};??KThFKF^p$P(zbfk4!Sv+{Q~ zYFV@(&D8Lx0br}0+Bag3-&=XbdRtmi^riX!g8lT!J(_Dph;?_19oe2un$6{iBhLuY zdi*CE#(na+OY&{D8MWHLj+0*~*v78t;+Yd?GrgffsA}o|a`R+G(0WZ+o7=V{vS;&z ze3Y0$v35_7+J0L2PWdD}I;=r}`WBqDkl!Z5q3V#k%w<&To`Y2?Ykqy;5_v71*i|=^ z^nvQD08C9h=cd`wY+-vC`>S3$vsJgNOXwkW+r3L@@53B2wieBy4C120aJcSWMUIo) zOc=bjXj@G!;Y*wIgQ~%98=Iwog_$D+hwCl6tC`2`Gm8KL-fyRbp+Fo+8W~bVvAg9= z7_T1%0zXbrFxRcZf*WF)oYH+Eb6mlg7`QVpNb5G_;AxRDYavHaY9FV9&YM%n4Vv&B ztK_cMklPs)CySU%$|H|_t}6DfS!2k*RO#*|bc`oxLR4ZbFqz)hkDxB4l+itPiAY>ZYbp+~@ ztJ5^&o10fwO^-dSH;lr2`#hA)J}*~h-HQxhM8;=5(e0=?ID%!-NX4%(NL6rdo+xv;A!(Ju!H^u=kA1lN%l(f1TXTD*VX zt?}NTcNRQRFso4vOfm7X*^>L>*N=L4+K1zjH_L6%Bi4uvP6^GkJpaO8xA)p8zJz=1 zsYT?Q1WYXf>-|PK5wEAx_Nqr*vmX9V9zi4~vLu_9Q1vK)!eeRZsN_UR)4beSk;!{O z_o;C6q}5%!>rD^hUP>l*fpc$V<<*AtlU;h)|CGqwVV5JCTTSEUo`_DWs7)BPu ze`ngVn0dgyT}{M4@&V#2?7@r_t4xMlI0TFmo*Z~&0+N7X(*cSm14u#xg&F}4S`QNR z)B(lk7&74=^L@&8=_GzT=RRoS&ICYpoV)qjB@LMeWv&~{LV6+(fI`;+R;$srXROA> zq6&y5>&#+NpU-~BVwm{>egp#7T|h*&&E)k4U=^tm@L@DZt3Oe#_h-6dlO7x@s0W@AtTZvbXjrc)5w`Ai7kS!XaYB^bp|Sh8z9- zdXSg&0}p|+Z+jfjVO{m%qq*v=RZxi0`b1q_Af@h?6F?lUgTJtAOb6z(@At~l%@OJK zT-R71*0m{2mOB87y74S{^f-u4o9`G067yu23F5xe%1v7a6$Kpgcw=0E8F$nG6MoPC zVec)&qFmdy;U!{$h*By@D4i+}A}HNM58Z+^0#Yh1Dk>nL!_3f9Lzf~UNGK&CosudI z0#YLBdt9!2t$VHa;oZI;-`{6*Ew|B;nd>^Q^E`6j_rpI9MArO-5ngQBIwJD3KI9gj zZ#3qKc!Xgj>zV6Ay68KAkx=t!LF7yyQ7}vbLNuJK0S4=Qme>Rx@L+Ff55-p_bB5IH zs^D`qKez*I%_CTOG-z@*m+LFdRT!k4@fKk3LvZ)s1Ymg09YA~$pRk)e!_5A)s0<~% zm9EiV*@3w+7Do+NigC-j(|6R?>XCx>peaZ$RXEGkMkBREC(xBzN{5Kg6qynhXM{kv zN}w3M;RdN&IH2|1TRpDo|EEo9{U!gEOy1sJNQ-q^b{xB~Z%z3ZNZC9}x3{P|Qmtvf{nX~*P_!uAWsniJ%%n2TQxWRlmNm?;ugBK)Ys5U)8mDVJzbkx>1(4f)uQ{krmqA{RT2#Z zWwxdzp!;+k!cTzYT?T96fPMCcGbRV7t zR?E6pCg?%gwhR_diU@ip4Hj^jw1N^-Sl{$_;_v zp&@{}Pzq8n)+eb1v|k?!vM~zY_1_+2&Ql0y+Qs5-yRhrLTJwmiM*z-Y*OnFgQk5g8 z3lGK!W%OZbu)=kkDkeSjO$;6u%Iw*Nb9J~9`EL#c`TT+;XrHZQj54V!c_6yfW}{4@ z48+<+{z-jw!==_MH6`k_YK}5%`+9cqVZ=DQBHVe3h1v7l96T!9SIn+$(q*fje?Yyq zV^yoA#I0b0*HhBTd`ccnqcQu3?YJ@Px6OO@Y`HKe#+0`La@se*Yq;EWpU+6`$`?{I z7WXRo5{ccPH_}OX>vSOq_B}fQZ~)&^LR3(QKN8p3)lh|cIm?2my7_gRrnq5|o~>Gn za*=@Do0;gaD_;-iCw5ZKWTwAxbuM3e#Ei#GWSzJmH2al8!Jtg(Y=-J zl6DYq1G-Dq^VHb^{(=+Q6X}}Oa&!i@dIts9u0c^FuIUp@-#b1bNHNEC;p*`WDU!{@ z%9eqNn=*`XAokGL*Ow)ut)Q5cE&3@Z1rg6zl(J&f@}==e5H~~=dWN8${RIq;D04}U zt(7keFMp#&2>)csCTBA@HA7wS9gBId%t0{cqr9JK+mQYm|1IUv*HPc?za=k6^W6`}B{8>FWv)p?sEQVOkTUvHU|I(q)|vXYYhQG`F6UG*)w+*)2v5;3Fmkn#QloADVk8SnQ8(HIrn>S>6OeLz)EhW+Xg z>sY+hC5@U+v>PPjbTAAOCvwE-vK@Fe`AZL@lwAw+F~ElPFxA5(hVc8#Le!&MD44*Ht3*6JQZnWeKGlRh>yNfcZS4lM#^?2-+Nxb z!v~7iN{s*7i8yeNMx#Y3X47!Vj>QeZk83*oQK&DcbJkO9yslSAT;|t$en+#>&zx_O zk0oDCMOsuCJwdpPYon+@`ExBgi4$O$CpFuUESj-t>lh7S{gI`+cdUj$ulfT4gOg1 z#K{mz^g-37*SC;X&hQlmUPA1Z2GaJAf7Com%JT)6I&fFHSR!AduA#yU9?G<6pr612(EX95aL3ugv(@5~sM%65^fDy0RGbGH=`EarZq zM(a`L_Fr}^cLWS&yy^(T7r7^{rjO?17?K(W*p-M!BYLcU_F)BixtZc4l7|oWpyX08 z7;=v%VzINI$HJ}(WL=u~@rg~VEWV<>C;`k5zs~~S%0I373`(yM((=fbcL;$)Yq% zn&)UMHM(DQa8;(AKohqG56u+t@ubWAyfDnSzT%?A)O{hsvs|81C<}osa5i%p=tUK1RRwA zHh6X-@}QpTs@Nt3O7j;6vIM%dZDZ?bOnz1|QZPknhNZEigHoN7kFrR2d?PK5f7WoQ z;2>$|lT{|yj)XgpCS zFF27R8DnHXUaRn~I+L(GcW)d9cfNK_lQ-n5D>znDKpEh5HZgo1Ebh1TBfM>2R!_gy zZ};@L_puuN0sB(RvWivf{&d-F$=&@j1Z^l(WD0om4r!QAs1tswYu(jho_)(QH(;-v ziCj80vAu&qBM_Q+9!JNm0f+L^Gq zHrX8;?)NnYa;`dOg-<0de^@BVrMb>g>}+&3KiBtlVS>BK6!{5*r-1_Ad{1=;LqmMD z_3T+VU+5-%(9y)$t#OFds8AV*2C`0=*O?oNa~FO5l%{+0d9rd?pU!l506j2`*H03q z>CW4tSF*D6&}#vzP0oa=;4ju2*a@7d=Z`T#H}9R7g(P304tJwulR3xEF!gM&yTrIU zNmlzYfF?}K8oxF$wIn?*x`boCX^|5T40)kX$_z)R*loPN;$Rj@`!?xXuY`qBNX zWiKm*1It9E5HLXgl#ajMzWqvf^Xs&lbVsRDxu3J%*)YpWqtW`1z5x$~TCi<%M|;Jj z@KIq1w{tyf%N?~U8=AaOV|gSBkVBKY8U*Z+SXVC$m;O0Z!9OT~FtR1!A}guAy8#U)xw3 znUdHw8KwLXYF57dwm+who+lG^IHfX=Le}1lZo6y>GSIaJN6P?qJ?XR?hiTO}T$!sB zMqffqD~8dqy8uND`tb;dQyU+%Jc1;&_{Z z(@K@?>af6%0y^KdB~e1kGqyUXsA;96tLzR)gS!?!mLH$n)UI+_=5ipMiI;FDSQ5j_#S14DG4bNQIGN{yjq+0y z?=ZY6iWgJ4nU*@v%tbvSh^EzIc5aZl3vraGUGgP7B%}6Ee(0Z+G6s-Jd2tH#lY;TY zy6z>r%22y%?YSM?(TKx_Y#J(_JS<##L` zIef|#)|+wa2V}`kh7L6*rIJ09rR@ND{Lkxa>U8ok>Gr%DawqL?rJr&l_==U#AL6)N zO}w$BCDTgqC}w_>*VxGLwVVpoD#a^mTg1{;Hn?V?Z*;2oc*Y@;Eaf864Ap4eco&3y zeL7Wd3d6dAF`cP#Udt03i?ZXLvSX3*zwCJa+xHmkY)ZIzux_jNfVGHGYq;(7v_82k z%EsmTM*eDXl&E-1$wk$u_C;yJVtQvzkpHHnS(4+$G5qA!^hafUtVR0w5BgX#rtV|e znHg?{@ z_-eo41D(dqY(C0R!;xrwx~`A8e1cs-8$wk=yS^h+v@!`j^HrA zlNP^fe7NR=1ge%7f(p|~309|Xo&m6CN5V$6?kP8kq#X;nri&Yc5gGiac}jvqL$=(G z!20tDlXr!8YhZtLTpOAT%w9RJd*(7veg~u-QxcjeL%h^E_WQIdjB^k8^M7t$Zyst* zJ7M1=Dzeq1-~-b!8!xVCz45~O46Po;C}v7=J@-Gffqj-y!_Db}k+magX1jkvKaL^c za&vh#C+Zli+DT?2j@4aO7iUDxD_SXs+IhT*V568L-o0`tz>t-N_mI(&vbYK{5mV6> zXIpA&RG_z(-_z_GEI@9klrfDz@Lcs*mk@WS&idRP8w$$!^O0B{clR7xU3-J&o3cg|n<;BsRS9Zdt zUcr*9ilI7CA8uu(#P{7iu%iTYo1%t3Gi1f}rjZrz)gh$I^E&@9zR4?-h+FQZ-;BX0 znO5re^x->##>c`X)Sl`eytpwAIHQkuGV4}0^P*_=`tyqJCE*G2b@I0 z4CS9@$A5OPlAvZdfwoss{9@rp);;rzM|%xtHmiV>DX@CEhuM=jg4Nd8|(zTbE+i19NnfWza9}GRH5tEiZfd^4>E($a56F+h529q) z@w(=bW;`?d1WL!T<(K|IHwJQN&?rp%DSn{G;n#Gr`X&sI`h3AHpD0BK(@AALp79e( zC3ceea$N3_31jYg*WMXM#jRS+-Zl}}C)?VoiZU#Q`rMX`oq7$vADQ;;Kk{)4WoXuh ztyOKiG4SaYeS(?_?j*4)SHLByGss%XC6ODr$74KRVx`K^wOa>TlA!%;-{yiU2@Z9m3qsx zj+d(3^NgvEiDwS=K3HINgYB#Aox%QOV#74~B)5(A|XDe2hLL@ z(lcR*irue{;I$Q)Jc*0OnObh(2e#%4NS|5VI}>O>8n-l%Q4yXY|10%N8fAG-EwK@A zMxvUIt5!-$TBf*Ru_*PSz_`6o(r812P7fgKVY&m@`|6cU-R}x}O88#%maEp339Wqn zH1|OAik0lz6MhEfwhDTZEG>3ddG14VG1AGKvaI@oHH1qZZk9ffW65d#LCc{nZbzeH z&URTp$=6r-<9*!^c0|G%#t$nAX<_OskhF1RuT$GpyG$G=ayzH`O?7)-kchT>q0&pp zVnDr2Q?nA+tVk`BpBFQ5Zu)AP{J!)hl%jpyrW{I-*$IqpW^L4 z)ZH>4etw>EqcuGh+KPwmo=N#JGm&5mExi&9=q)2Y!oU})Hj+^7F^9E{- zZ7+g!R|cUpxb5NVJx*hiy*Xo)xOl&Mmf@_h0A*hrclEPZy?(sR&BK$YQU`@l?9P_E|A9%&93JtA zabTvfZ0P*N=Qw7b?GaasMaR>=$L&|}j5*0-cS6RWZFxUUSib{G_5j9uK+p#1ekZtE zRH~AyE6X?e7@vDx1#=jF?f?pv)72u)>a5jmPSx~izw9x$X8w|&-VPgP^iH#f^MDy+ zdMP{8dy_gvBO+Uxw=${0LRtc)iton8Y%bZaYSNqP95%eD*=xX+xl1Gv))FqhJQOg% zdR*W|d)&sW*`wJCI_t;ITxpkhI?Ldpf79h)e79{S9m`?%Zw>vZn)YiZCSgZfy*X?h>>l9xkeo zSJ%pg-lljHNElo4HWbZU6D6i@bDF4w=vvv0>1Se3_2%WMDS~6p8(h@`&ugNivduqi zG{4-7*?nVbN>WGoT{@1mov?_w&n{q}UOVTv<}%^G353QUekqnNsS-u(RxCFxE*<># zCMvg*yP&<`c0S#~S9MbEs(Bv7HyCD7ZHyAPB{>x%Xds9vq0KoS#7N7cc}h`&nv8tD z3dWY(DL;yQY#a6E)@|8k*Jjx5Z}dTI*d|QlA4A6Iz}70uY66kaef0P46WGa1G(rMl zqO)A-R&DI`bpqeCw(M6B9M^#<%tH2f~=t{%hwSRS~o+# zPLg>{N6+_qtFwFQtLeuQO-ewu(JgXbYG)fq4o98eRF3JBl8e=PiQJTFR|>Xj=9J7m zszB@4J7S?rj6PS_M$)kv!|=Jt^zo^XTiL~V2AA0{LK@Clk$ypSFuaQOs`DUWS@))i z)dt#2P5JeUrr3#S$IGO_{LZ2MeIFmWmuY`ncT8Gi)cB?AyMfG8+ft_((CG;JJfF}qHq1(>-kQ*DIA}j#K9E2AU@oSG6atzN9%mn3@TS#)CkN)?^6{cQlsk}$#}R|sxJowX8zEv2&$cEvMgl_O%Pu&Pft2? znlOP=`P}W!ySCnUeV320cFt4;y@oumPTf>9oXLxnnr{E(#eG-#HJ|-K%>u3 zhk^TGDzPhEAuUi>W94G!RYSW}ypMr~jKW8u10U$cHmZwyS5jX(<$k=oP=78l{3YLo z)NaB=_Ux$*D(#96OnNGo4aJixsy%wix6fX)vvg|vffD1)1kZ4S|F%i6dMp?ZKY(+ zTK9Cf&`e;aJ?ViNsp8HzX}tI{Lxx{*8pWg6hnn!|#Ji|_IiD~w{7L1Pg?+`jea=ob zY3fbrU}&-X76Sf7TK$Wrc~4U0u%-BL{+VcwWB+U)|IPRes7D&=uSPi40h0dr-~Rn_ z77P5+G9O1v`n~$?C_11lhpvfx((m!3EE#y|sVFXpN(A7BLN=X{q@d;F+m zqQH@lDE__&SXVz7-du|0c%jS^Bao}`%+R$0;P%fS3J5-)$cQg<(}lmH6r!z7BCF@9)e(W z9{7ldkV$dlLzh7yxD5J1!)wzWH7!snb^@HBNMEz3Mp&8sq|a$8R6w+2@u%hCY9Pn_ z=V~-gkQB=B+{Y^dxmlE5GG0IZeArDH78aJBCO|CCwW-9Q8rS9r`*`v9jmiZxfvxET zHqy*i!vz~?6rwch}DGDXR_;H9BsQIBiu zV{~>LxsWSt7I{|d1b9?5C(TBA zmX(x}PP6ml^1mPP-)DfO{TV5Y5g{*urt!(0o34aPfFEq#g7t|)hVxO49j3)B8?ZlL zS%uMwqD#AmSAwuvbR#65dtHsr@u&;t_x)Qz0I6{v7|U`Juj5h?1fEk9j(Y*>^lC>X zYK~rdgS4~F7(fO4Ewt)VwxlO?+zaGy!+B~5VPV)~@loOda6h96^LJND7{*eSbYqlh zx7(9mufo8NR@lSh7r(hS^H1p=>8?wE;ngIzF&@$3%awVK=G$NU=${<%fL`U#fD~~9 z4ph-0mXLI4+|!VJ*@2VB7o7X;P2XX&Vpf$H z{^zj_&{g;h*bEP5-__^-3m|?m47(s#?jT$s4BvW(pg25hqKdi^sK#7=)xsjfMGIN? zDwA-v?uKLNEPBrqmizbk)LyC2Km@!6-9)R^m%x9{kI>N(KN6qnFgrqizWmvuE<&4r zBVbbYa}KzeHw%}|GKggSxy$m}f#jF;&Z=S`?qy$cW1x>w0gqX2H$_+9JkZ+UVQVLp zUaPGaDRPOuCU;Drz5JGA9nc<4P6(lU)c==nghS7@0CpXrnTX4K?C4y=JESN1_4VV{ zPG^MS+_M=I0mym@-GZ0`3G#IO0nqWWdMxz14x43ddf@aP_B>JO#9c@Mh7sHeXs$*7 z?1#mJyZSf`$e5t@PmQIMG0nMez6S4phj!(6-mOnUj#IJD;w1<{P77l#w*9^4%S6^< z3NdSwmu3GRyQol?M9K(`Ap(L`J*GYo)0yg^PGDk8^})<97?c zCe-FJLuWTnHTLpIr`>dWl4>YCiFp9}rr4VISYU`A92*Y814N zP14ok;|N@5ek1JSfKvM($4Pru@+$Eh3=$>{`%a1u6c$LIHK7fJLMV9s};5~&}-;OK& z?aTkZIw0x|Q-O9vNq*Q{oMJ!6}{X1rKJ6C)q&aCdhhgYHP7_W zpDk!DJ)x8JyqTNpi;K4b&lb0Z1P$Tg01g6bz`N-IX8-Lk2&r2{%gj33${4VP$#YBb z0MX;}YdTT?{<>i%e4`|O|HK~$ar;l$4o}>>-LNXyKF}PX`T1>o+1&o70JrWSU9zA2 z-%X-S>3#}yPdX>z%4XZ33|VnHEk4Gp;q2o#fbmM2>A7Y@ByIxiZl}kz!mAV!ZEK5f zMGDHHj8`vD0g3a$zN4!tf8OEF$~)&Ik6(1Qk5zs#YA|as>nkOy5!Hy!^1hP3*GUcU z_+F>6UwW&7-fDTlYd4421_Kq23^#PkqohYQczo;cpkTQCk%C$LTG?ffS?zUXP?+P1 z|2}-4eFJAxso1WOva~CbW>7QCI4<)Yw>2igXJv1@S2nr)dHJ34_z9MFvcuY0c@`d3x*B)y%KrUj|2Q=o zbI4KLWH{%)*A_^mJ>HN_j1ja7_TO%d;4C3IdhOg_C#OjE-fczx3W{Yg58APRcvuvT z{xJiFB=9)w*9pe-lh{-1dmUh~Ac+{#WkZY(kJQ(|z0NRLrZ_MzS5AcOb<+MI7392! z<#8KlJN&mX`llT@j=Zb(@Wx(u`$xb>IK)k7M2<-BVe$6C^yEZhF!h&He=qAlZ-B)O z%!8nU+Woy&>JJhZc=xO9l|569;|<6SeS99l^4IG9>oflMcK`7J1v!(5QR#na0sQZi z{(B$)@5cUnDgO5y|I75%Lb+kSHu0wtBlE3y`|<#v3pj3<-Saa{|1CEB>jxwg&v+5b z3EIPN@7zHqA%L#aqdn^!5R)xaaXn_@&d2gNXBqz$qWxdSQ!*OtCeI~}I8F5GhBs@>I^!U!#p-#A`UbL=XciTX88_8!oQ`j)>o+tb`4 zzV$6=eP%pEGi3O#fk{|9&aS%l=GH36Y}=w_F}?%%!0Sr}+0y`_ERwoU9o(x3xN-M0y1{AoYfcicA}FI$ykNOk?tKde1&)Gf^HrC*Ejy<-k*s; z930KcDYfU71zdr5UCy1^D@)pU*isELj>!h4<9i1U$h&ceKkR)+(Qrd}5m%}`>j(co zZ`V33;pG?<2pbS}&jo0N+(s0~;{`mkuly#p+(kt6U}iSkIz+yuzy}%L*wfe+x$AXO)@%PS1?GATTS+K(JjT8rPW6gA76jonzPPnd>mv)fI{; zg`=}CfWyHRoYL6x{zx5fNFG$N+L7#zw|o35Z$cqhr>!Mywd%TE(^oi~v5U9;DL1BpI=31g)v?=WZL-wqB^%hWBoR0M;BaO>(&}}`M@{@Dk-MX#(;=lAQ(Bl zl1a~e@PWxYCxSUgV?1N0Q~CTF*{kp6^UYWXu%k}Us`a?^@xp`nAA?I{AKpSIw09T~ z4|+RmU`BTbcwNV#-8d21CX&CO`wOC91~kX@3FC<6^!^LM zEFt9v@_k0mV#0U7U-w583g3b!pd%hH1PawZ@{#|%w2ddVM=P<{;keUBpu!R-d`_7q zCJP(RJJc+&DS|%M-#NR1^0MBWMDf$O%|U zPg+e+w`oe!yMzD(G*2N52M{!!WI08MJ~qh|_R7)3P6k32JS&zX|lwVyzJp z6>}m@#8<~Kk;x}{cAToG0-Mh{qd!1Xkh(&d`wKu*gs#@n?X}6-s4@Utg1Z3B?d!Nk zfU}7YP~IWxV{Pr@a7v&QfK~1ltTS7JP(t#e3rku@9fq?;T5{@pQ27YTZM8ZGjg24no>G{hAL)a&W z{OY7dzwKeh17NV1gD7(W| z5;bV1V2jPD|EB37YK3WWc4diYiXAo+J4a#c@=m1_fvSlwfQahUe&3$nji~p5&q5?G zQDPd&+z&AtN<<&vK0h8}^8WTEH%r&RiWZS3-*T{~P1~=!zZ<0xRE$}Eh`!)&K2QZ> zg3wyj9E5fGp#BNR9;@%L$+2hE%J7_q4toz?&-4?Ue}d0)&{d!{xZ-4%Vo}3wG}>i9 z8d$^c%5{J%-_FH&H3}-9t5Tb^9Zrb)q28;9G4+^kKWj6P7?&N}5a6AWSg^7*4~aI9B7EmOP!x}; z@9mNS@2SJC6NLbHTaC_|fRR?)+K}z~@bO&Z!bV@!uGL#V2dx=*mXNeJwiDM3&`L^H zU57lv)4HQ=v&_yGz*r915~L^4;pd}JnWKWM%qbn;Hqq5DzzzWUXFCDsEIjNU;SB_s z`P)^hfV3!|u*B`(%eXwdk34onR=MO17GvK@R1y$(JU`<(K9#|KWGpDZ^&}<32t(ex zlO>hGkCjC$p}dP#+j4{6ltrtMDtWiI%trPAFq}XfYFQz5CMd@1iu-gswnm>6-x`}* zv**bno`t)|r%fRh7}d&HiWKe#97vOEUfxLc0E#^8yXnULPJ16L6l^B0Xor)0xJ@Hp znP54?rmzsm)xk%`J(*Xv>vac&Jl~8|cyDFi2YbFASpvGC2=GyOX+~QmDrd#IZSQkS zqkO$=EX_iu=jRT_Ct3%W*3kkoh9V-pvu?|W*f&5BFvS5GfKqtgN@rS&r|kIPs|y&P zc~Gc$mEA>Iv~0%J6uzw&t@xh3nMdS7? zV-Kr$Pm(GCG}G+lL}EwMbSB4N+rK|>(j5_=HG)wl<${VAh-b>6mBT79c~Y6PP&$z> z1yMxgbl~I8vApO3WWzVm^S?^QI)T+?z^y`roR7HzM6%7oaT;G_L<*=kq|bGwtJBC& zAu`b<0AuO| z!a`ok9FSc@6QnsgXNSu?+Blb;s} z5>`SsEu140=#Zw!aBqU85m^VNUHPg0J>vx>Qo$0UV1g>(zSO&&_5n%Eyu;s-(E?Hr z4weF4nHSwmqfO1J<{mr;1m=*hNRQJSSk&<%HjyOg>}@+oY|>>Y8>Ld zSNUt#dmVHSPZnYmnQp;X-cjZ+!fkMMdQdUq{3gIary>ek!=;w zm0WnPz~pkH{^^n@X5@DSzdZv=9UP@d^R5DvHWwh!Xf3UT0W_tbtYce{iSS5^Or+2K zC4^~Z#~4N5K(N~bgl}clyuRfi=~AT4hv@#>{?J6??>i9l34Q6inMnVp|33tI1*<8@ z4Av&(C*T#};Fn8pS{@YoPTwHSctTMtJZ)nq2Znq)fv8|`gG#(;k19~MeL*0*va-jo zq5+KmBt&G-@4vo^$fms2xy9o26fk`$n%8gtl|tG|C${_Dh;hC}w=<8>=lKCpsy5#U z-P@7Owv&<24Ijvb-u><>c-1-3{yte1IEEu_N>XUbCfbA6bS$vyD>^x>cAJ=PA{sm( z>-#j;b5i23rMBY1MUcFF==v#!E!B`MF z3xP}^mHHNlM?6xAJda{&yn!d&(%d3&4b%!3;As@r#EjBazk}vhQ{to<)#)4&lDgg$ zuY0Jf1jw_8pdi4zv?287I{?-DW*(&1%jY(OR&G?vEeGx@&@u?GG~yp_UP}YUWk=ep zTYn=@PH++1fbcO7f@Gb*mbc#xY&!J?0y(E;SI}(LEBW$>VRI!#K2%z%=b^?XHDqEt zne$2s;VRMyAYuU`qdQ2juD*z9BK&3-n}N7$ynMbt9Q4}qa_~AMU+qEGbcuBc@h7nm zDj@^or;drOYOI6wH`^nJhY@wI-jIC_!En?GW|Jn#xht?M)<-r~PFgClhZK69IMv-8 zdoH0!C9&PBuv=7itqrou1%9iO`q%fR2OKtNLxT!IwW0j@3Wrt6^37x~cL=0^JoevS zdc1`w=3+9gz5JArsHu`5CLi`7N;=a}!H_5FQoZS{PsWSbilTjawN3*2T>BPUf%ZlVr$ z)D-LwpgK%r7*1H;(q!}lWh&WHT{p(PrulHiIl+`L6lc!Bi*K!ygBsD*39|AZ5@f=5Z#^Y4R?~xj3!tN&?t>|`#7F_Thd=_rEFYKW_l9MV|P`Rn79e->-h^k zx)cm`XLGZBRD`l{u|{I9_oc)tg_eyeNei+R0Fi;NvJIp^J*y&|eCE!Oi(05m z64|*CMB*KZJa%6|{L()RXqXW)?J29XUf@4P>QZJFmtS!`r*4NJK}7Za(~#{8X8=#2 z*7Q_^NQ3jp_n2X4Q94PZ_jyIl+CT zPUV4_(mD2!oc9p5cQu?FTF_GX3_*y8>i|9a)uKZ1>5Wf_Qg@B*b|sAvLY(T#xXWc% z{qPIWUi!!XtNU?98^u6|^b5g9a>?g|g;c3;?b;1Uhk3g{Ne&8Rfsx6ri%N zRw+m9-i0_~2w{4OG|#%i`CljhLn)K114mgJb~shP*IfU1Z09ZN6KTpCV-9h8d=-1{xY+77rKwq zlNXZac~1KYBs9i!L5%1Oa4$A`qRK%%<=g?hJcnc8^*Z*z1Rvvixu>a;xKWjrOzyeM zc!x&FpQm>61HT0ON_b5b9H*q!M%U2MYluYO2otGh^AX{`3=-f6nDkHgygfbz#}|l* z++^I}wdJi$Xq#ZaF6_byozl^mG4+02+_QRJmHcw6VZ43Av$G?!EnjsxRqSGDng^<1 zA<_qDf|b6UHZ__>~qz)fV8c%a871O3-T1^O9nxCL?q-F z((n%HaBV0EBa_puDyV*Ln^^C|Hz0;lYZQ4~`FysY*`@YG2ak|F+YcD4<+Mh2g8KmK z06O@n0e*YpyuS-3%q}?263|vBx)5Ed&fu?0?p$}O|mNUdgaeHrxiGg>%D zop{!%Lj4@ptdT+KDDIE#p(La26|~#OoiMri)(EamE%uZEEW77VKrMcd*QI@&W@jH9 zC~IU8f)r>}>CPxp*DDxSONf==k})3}^$2`jDx&Vw*WXrdH;Zbxg4}}_??so00;Ml8 z)Od4)yg+ikh9(WWm?#fD`d+$2Mo91;Y^VR+-6nwSy!7 zC2mnVX%y1|xE)1TZx1%(jy zAu<&7^-4FlH=5q_Guc!yK0f+Yi*a_|py51emMXHw9z0fGsCM(RTZnNplrZIF9^ytd zI!^lew-8k%AKSP*cylZQG;*b>Lryh6&@JEj{DAUXd!TN0@0Y&1_NwsNI;50CmNFB- zVc*abOXN?9d2|_c1gBlBN?B7sBc-8IKmy9nQ*L+^JU1Xu>_Btp6K#(iYL?FC*~sRp z<@SwPPnmfcY@|h=`V42G(8eO6r?lIETSS?E^Xzg;rNLv`*v@29>KPQ+g?MqS)L5Ix z3}#8~{@NY=wNlcrt-@V2o{gppG<32m_fPq$bsUg>CqD|ExehQ^m*Jz?B6}4u$AiM z;TIe=vIt@)vrenddOijYXehji_ROt;(3Tc6evTJ>3;e<*e9OiQDCvWOAvhS}VB8!} zwc-26Fb zPm8!(ew{6|t0J%S@$W3=uKuc{h%8@c@zJjAdOuh`(g|c1W=aJ85v`QAoK$4itwdH1 z?cJCNM2;w&Bda58;zjNf)gw(-b^qBj53Rp{5u0R|TJL#)YszJM@hbHi{r(EQ&6;u?3i8lPcmK7fozW^VvEa=BLAi2h2*OYPay&?8xOX6~W+{5)p=%_bqI&LogMHVCB2S-so z8h)|b)*oFJs~A<)ySrtWda9}{|prrMw*Gt`C~>jZ+rG3bCR~$S)Wy`nc3KZNcvoYyE`Xmm z_v-1-CuhfR3-v!CZc8J^UN3;ialomthvkWZM&NpIk*+e% zmPXGHZ-~Xf+I&GOgo~p&08jVhu~}aj_Qe`G7U2^_*MU7cX?a#PD(-NT&vh~$Io_$= zq=xXvehd>>jD939Wi7&PNYp+2&^HZ^qHY>YP22bKeRcpht>SR(|B`y zJsa}!R*Y=tz2srDhRYOl(qAWj_i)z~5&LfFzZGzZ>HDaSZweHx7s_{4U;h*!I%?rC zX-yr)eC#_Y^EqjyRL@93ors2HrDGPUC`?Lf+;Xpx^Tu@dcM(vuJtv1xQ#}K2VK)Y6 z)mhp5fcPctQQme!e}Y*0c0|9(Yx~y@MdcKyGoI%ayau5?IfEBa${dCk4BjoTmpQwTvSYlHbt)zP6?yIdurFmpJ z5d=-OKC3==g|6eIzOs^M60JF#A&G&SEUl?{0E*oUDyD0^Nr?O{CuQ!2WJ+Ed8%`&| zN4N2GMHEt#Fg?vCjycwt9~HDw*ZC^vo@Hs;bK}rBv^jf7F~o=U$k;5pTHZ(`c(mcU zgh4kNye;Kx$5E)RTsp$P)cNq$U7pgOuYaTwO`s_^j8G_Y+qJ{`^lya55sMlmxvQO% zXuKXmY)Bs}*>7WCxLZ7+VZSo19&&d)g0GSsVv~(!Z3X6>7v zXH1Q z3W@rK)2rS9Xn@tGblT+hr#gIu@3jh7rjP*ow*~LCK736M5m@e$SW^??$qsuo$uo$H zlDE`Qdr^BJVDd0-bL)ALtg&`q91M0{>Z0(z2I+GCH9_SQTF=J4uTu=L<=#Y4|01DG zkIp4f|1SL&@`sokWX-6P5pI3{fHOAl?=8>op%V&J8-0ol$+nAjVT#GJ6TdV+@cI3N zVd&S*tWw~M@Nuyp-^P>pT~rju`T_f9{Xw~u53~28h(~Ui^Oow2 zqDkm*Y(8k&Gu{h>l-;#2M}Fml__wW&`NN8K^YHNJE(=0{@Yd9Q?+V<{asxiiL}^~>ks zr=**dD=uc>??{QyhTPks3(i#FS8-FP|Fu(F!zD&>yy0;h2;@&3JbLOgNaBtf)Ou^YPEu)UAVs;% zvn!y>!G0jO)UCcSN-*p3%UgkYOzF-KbHY)K)lsL9nnZNn=AWcGS>er{R;!(@^(gsn zq2cxE1nIsIUkO8tO~@?PA?NB7RwGk$tdnD{X_EOltVS*G^6S@+*8L;FUpL2C^!8~hO+j@gB^=5hQ0ZyFnnb$!i-(D0s z*fw7fA5;*%7IJ~o=4}pM$VK`0P}K^mm(FR){?11679NtFYsmtMVY1(RdL1%P^7$D( za`{wA+~jnKtzNcsi7+kd94NLbn(3aqmX;aq<6&`8aQwb0&zVr9^>n+?QFtcQGFWk0 zfi>_TQ_Wf1?hg#%2CnEhuBw;i?`4_&nYdnlt8bvPVX8*O7%?^Jb5mZ>hdg|zqUOVX z=J!_yi^3bwUp$PF*}wFv*O8&jHZMHW)F+`|_?u^9fpw4j{BtFU^TqexI{mH-+QIVH zMbeB4zD$87X`uwfqMMzvv- zx!+Oe57Ok!>RdiuY|2rEZ)Pj19QyKJ^Z@<`?c>{Xl`$lru9r?;8JU1u=Gpzq` zao)O{Jl9UE+jUWAusDakwa{%YNdc{cNoCL4IrrC~=g_n5zF0G^?ZwZW!KUXQ={_dB zFwaUOR+pu-QanC-+ScSBgN<3@CHWnjSM_7Oi5J8(V?yLIgJ4K6GN;$=@Mr4aB!epx zrcgAg7?s0_Lu#l+tKHHVjXAOJzjJxH=mU~L3y#P$z?i=M=eXTMUJ|IjhFV7y-TxAi zu4|@Er~CUh{xcLlAWS)N8kFcWMHecvu_3>|Z~WdLmCQ@551p_1@QDL^eX4pzaPI$f_m*K%Zf*atASxwDBPmit zDcy}UA}s?53`loME8X2ncPc3;>5$SOp@2w83eqLve_eR*eQ)=@_wl@+Uk^SY1J}$I zYprvgztW>$RQ>&`XR)Dg^+V(3BoFM7v+vQ50XLhqr8FmHJ^%~wGZjzHGZQiPd#uE& zC2DlM%StQ2a|7bLX@CqY;R0U9Xzb1^gi2oYm!j#%z*(e{UO@l|T!s(W?KMD*o%1#0J1( zy2o0vkI2R*1V;cCjru~>sFMpoSPAGY6m^-$*cAA-hx)lePN z^n0Xqnb07OhV9!`hNm@vTx1I%mJ%y7YUbN%Es5`{z*CQb7M6rD}T?t-ebl{P+8HOF0XO zTE2r0@RMHFYNgYd3m0hsVQfHc8{)1c10Q!N{g9vGjY1qipC&~>OOrlWMQH|eabIH2?-AqT&FF@2oIx~wGOWn{rZIu_eE^u?^cK~#R(0S04vzXjQ^i4a z{6Y3?%^AQGn4}gk z`=!n7k1EN_*)mcI5Yluf{L2&w&CrA>tS?AygChWcOjl#BI%UoSdv4(npeIRmkjL*^ zD0rOXG42}>Lf)j93ZV3Qh|F>#eg}iy!e3Pw)&|97;);N?{|@=rSa7~I18lDCTRtnZ z5RQ=|jNNhb8kGJ>Ur1s&&*f&fs}zcp!FGBki_ft_uavv;9QoN73VPg8Onr zG6W!f^?J@4-ZrBl9Y4&wzmI#B$Eex01Z_*}4KdI*dFAQ$4&dJoLLi#YApo*h?$pov ztt5c4E2M+_5cFTTHlK!hLm-t9xSh1&MKyCC;6lC5%+FxQ{w5_6n5EPJ#xUiv_;D?i z915(9yQx1I_@K+lCuUlyhNPw3Tm)0nx)NMHBw~Ui-bh6@B~0P>Co9Vy&Hl1K5q{h1 z*8RW{jNT&fVLmj0B+$@BjxU-4ZDb{6aMw>JXBJ+bIipoHo{Pw!=9i?7AM5PCtn7hd z&ZyIsqYx(suEgTz^RI%kAkpu|;1}@JO(7`zLnU6?shTMN?H!}gQa+2^frW1o1b$&O zU2Utf1aRtp3#2xNCRWHxMwMOg2V_jzat3^y1-$NQ(iVJ_-0rAowQMKUO=|!dqXkz} ztzN!Si(VC1Q^AUMpfaD)gr4dCQDp+sTa@-_^?Dxl~-L3jh6rAuCx zkr+a8Za#ks!5@Kqx+eeqZ#lsD|sq&-*k zS9&Al$0-OuvYWqj24Junso}U?NM_x|o!!XJJ87P~LLXrI1MG4_2l{uzNWUJh!R&rJ zi(9BR_h-T6;tH1QmWMc$09u8@fVX;R4k!TzD(VUGQ9!0_c-31lf`nvJ*>z`twb^24 z6y-O@aiebPwo@f&3Tg>Mu7Y;lF0B(pn0x}-mGJ&nh8z8}c@$)r34n^e`FrCmNYKIR% zRbzQ#HB>@dgtga?0nj9%Ze2O9eZ zh+GU+3FSbPtdG}@?R6B&$-*4epNb3>uA*+c>ZQ6ZmEB$;F<-$BOVJ0#-W(D1B*`wXx!7UHeN}zIc@=ZfM&#tYx{sn88#3eJQw&5YYkLN zPM}P916c)4gA#=_J@YWp%{^{sYUUGgoSIs|4gucc|-Og0UE`0%Z1&vjq zb{)Xx{L6Qco&)pcfoui9qbAd~t#XG$DVqLEj5ZhIn9#>+zFnAVu-;)|+M+OhiT?oL zm?zw%1!j`MB>HaIScxiL(K*~$={Zvs;#h|O3vU2&d;YTaqZuhyeby2cd&GPC_tYp? zla-Zvh^3l_48entJL7zJm12Y7_eJPe{Q?t3N#O58)&XI@U|MQ4kVUdo^thZFM+Su7 z3_70k4?}n#Wg>@Y?n5ii)>9q?5=|`a?na}%7}r_sD{z)Q%NWuhK`JtZzymYFAy8ux zUkx2+>aFU%^Nr|{Hhdc^QGYm8$AZWAS3Oofkbe8m%IUXne6i6|;9KDC*`|*(i$D z@~o}IT98K}TYWZ1_z1J)z^O}4e$3}(zQXO_7r2WJ(T3k8p8`_e?mnNR?s<}AV9Y+0 z4%lJapC|9Iqg+2^+3HXq|A!hss|kN)`b{-t{%;)Q5p)0%3iTZV4VcpExZWk_c|)_j z_oD}iGQY1Te^mIG^0O0fD6vw#O?3YG7w%<=0Bed85k2WdiGThLOl7b|@iP!NG5F`# zCoh97%4D0S#?8M!tG_-()+`vz%tu!=4*ucvz_0v&_yD&wbVYQiI_uDPCRBOfkHBA^4gK>q zBXKFULtpU9vuEEgb8OXt7d?niBUJ+4ZiD3^)9b_du~>B+uwG4e1sbn1K_RG!Q77dX z@}5m24MDl93=WZXV(3U9Ip~*hMFh)(n1KMiiJ1iSU)rog3A~wWZ`9R9Am$Uuzc}8% z!(1Ii4AltAh<5Qq-;66wxD1C0Yw%xxgnFOx*8z8A1AZNDk5!2w%Pbo<==bQA$KAr` zgmdkAa@RmU2E=UxMm54uz#L!4Zvw)Vo`BWJGyF_msBAALiBAIAr^w3Dx&HP({53e78GP25YV@ zK4;TFN0H4APE>r*@54!J=)1g0NpRJM%!j;hU44st3%r1RYLoROC*#Uf+9KOgp-S}4 zcFUcmg_KRy~-@EYMA`KNtA(5;3d(=K?yZ{rv(;s%Bj2HET*v4L))C zKK%(7Ucw*+EzKa-BJ$!6fUCUeFK=A7uT1DKpI8V^HS2ml_$1EM;^4}X9!-gF-V?z( zmr?nw9r0xTYvJp?7OJ*0PL^VY>KwLz8wpVgSyVt}+;%3&?3}3k&!hcb);XHsgD^*N zwHgEO{X~ec5A=0&Br+;F!V;u~s^|>-@)BaWBgOt)#q;JlVgqE3G8&?W`Zxw3;H@;sE~wHc;Kn zb}rM&GMygOwYWf=c3nguk>2t^`e#tItmwPVU3mi7lW##C%_=LvmYJCTWad*pdmA=n zvf~1ZUbpagh(!-|azOuX@D0?>?AXquY&q_?n+7D!wSo)}%O`+~+yEfEH(zTYL5@w` zXIGyuP{9H)cwNWFPvk~J*7FK?U!fH|w`8sZJk$o4ad_7S3l&m+K^9F`}i$y^C>>SP4}oKfdTW^)HH9GAgCa%0!UPeaQu& z%_I2)I5Od0R-_UdlvV$;Ss!pADZh!GZuyh*SSGyUg2)wouInF_wwIVx@&#EQxR*2B z-QJl7Tu`X-;3WJdyAcp>ZId6lVm!%z35jZ=&dIQ0j7rhO^xPT)dW%l?=q2ku;s z^8&TsBa#_s3W|7|Z)Z@OZ2=+L8_>U*BnkZH2JE5|2*X`hp-$8kViUY`uvKny>rfE1 zkZ^}wN*9AJm=w((NWQz+9)WH>2T!f#{DWr%pnAV53H><~yq-$DfMy2FvR6PS2s;;-TgcsV8ys5_hmWdji(yil4Jp#_o?CHx6bz?J`Ad+o6V>^dMto8epz` zIE>ON%@YJJTGKTjl)phf?}wmyQwQ_eoN0OlCfidV4;|1No)^>8P5T%?X*7h^t&qcx z_TymV+c|JDuIs+5MSAMyTC4#&RRt>)qI!Iv9><$EO!t6~{T-4fbqVc7KRq>mUkocb zf^>_v6wI+jmoD+I%iojG0ICjW;0K(HMWhMtFI`sTbBrN8r?0WHKX4ym^)+~dF8j9F zOA}8OHO-RhvUO>AXYVdxH}Lzw?ns8(1$WJVn|tHTosZ$djGvL|YFm4fy7}u{*{>N z?(%Bk=Z0WebpqyvCDB1q@93oBpgnh8p@0QI(by6+5F&anXY`FDdAz*i63h-Dz91x1 zZhk2*#MpcO>*l~YHTKxGqoB>XzCdt6EL9Vb7xaQcsq3kp%jV2ys8?tUo@+J@RdBCN zgQbUCK*+S#q9-s=*{8mHj}s&h9)jIKT?j4~W;|pHri;)hy;N-TndQgIvt@}T<_9uXc8)cU-xUHo02XEZJ?od#VZ3yX;29w&{qIvx)F0a%W|xg{R} z`?y2&Z3-ivTEH=>2b2oVFiyo3ScR4)L#yZW0n+N`cYfc&esFi&P@)lx+dBBNWPczN zLDqA@Ql+hFN(Gid5Y9#Rs28%pbI@lq(u&dEFob_*m1lWSJu)aIXn|c}-SIr^kk>|; zum^n;xRJ~@xdYF<#ca|Ze`ti%V4-4Ld;yxy#w&@nvCKqeJU=cAVlOB#3o^4uFfqc{ zY-9qn9~;wMA#Uu3s69dw=OCc_7I9kFu61Rj8=u~n=y-i>X-J9ivK$V0k=}V< z>$r{jd^Y&ojSSp}Wwi6i`&3Vh+=g~;8^t8EWP8*_tA=$SEQx3>bh?X*4v9E>^9sG6N{c&Tw^^| zG7KBAFQLnR*S*dCjw}Y}W&%3w#@4b4rQSAslGe({w72NgczRf;%I)9!-6*V+SssiW zCmyJ}_K=>Jyq6W$_i|zHL$P|VgDgzE7L~*(*gTKC=1=*c!KX2~UUW~Tjqfpa^sCy5 zU{4c>tbTq7^|dR z_~yh#BhK6ywdQGt*N%LbigkomkG$3XHPHP_KU^RykocTPw_j2h`Xb7T9L2XcT3Epm zi!EBf#y-P-r#I3s!1T$HD#sV)q$@#EgsMZ&XYAXq^BguJ@aq9*|Kzq9sJ?yBAE+Hm z1rmyunip+z+@r;dw4=I*y<<<55G;(R4X6W6V$3`3PCES&hF&RCLMXlDK?j;~KuW+V zxCkJJsPW$N#~A8&L@Ax_csyS8%)(MNgx}+6@$m-^ojSj7O-u zJ_hr+=-GRP89T6kkQ4wd+Oyq2BJ6PmNHsxym6}-@${IA_3!X#nD=cCRmb;!67PW8GJp8wCcLkQNW&)bEeS$S_x zcY1Sc0NTMT`$m$nlA`_XI%Hm0b7|1STSbWj`))>($r7kG_M1hmg_>ErS_jSspPnzc zU7IH%VrJJ6K>$O3zEi- zSlcvFcFL2tuh~X8?ZB$?o;yN<6hCSr0*d@xu+p-v;||vD?ljg!E2otWx0Y%S(d9OE z2>Nw8#|<@DESOoSaAJ4Uox-ZT`}M;lszX_tZr^Ba!JxJ( z&bk6C^EiDtYgNck@j@dWU^CC0$5dMtAg`n+q>d3F#kT!mh$>$#-Q=)=P3mg)4oq0t zIO^tx9942RU*=S2WR-6PusLV1x$YhT^I+mYG?1?8V7-=3_ElJn4Xv5Lc#^t@V!OYC~IH78$K zdo0RST%hN-#bB(BT?MKunJ|*hbFOeqzYZemC(^O+{I8wRM0`!2yP~bE!NQ(pMvzSs zJx3#t96jLH!y`*M+U9)kAE$u5ZfVYh8jrE%5Mxw&M8A(pk(QSv$MY!}bBL+j`} zq&>>PX$L)4{-BQSnK^GMQM?0$L^9o&KK0~N7z?EL&y^_dEGyO)=Ad`uQ45tIy>@;& zpR}Sa&hS}C+UQ1->NjZ9prV0VK1{8WOjA={Omc&Ktt)1;zNTP?;m$~we*0$8*oULY zX3+UvkY6-vP`~3C>#sEX@Ld4;WqLx9N8NxDFb!Zit(>gvt`Zm$n>{6n)(d7NY7jYV zfGj90T)va`T$D;QZ48&5Xn9wPUES{6_DiE1Ek={yyU{8l?tQ8Oe0dLS4Yk5FM+Ip* zfQEQJmiMdPtx0V?dd7(r@0O=s$T>=X2aarC7;<$-9knu@>F=w}9`f}~3_DZlA-r!{ zTy4qr0zQ2s>PG<02D)OuK2a^*T}TUZ$g`r0q?scU`u1aRs2&DJaef9m~v!+A-9rQ=nQmtATi zj{<1|LtjMOnO-^jbhKF)8a1IdWi*w@q-!E8IXojSfYW*?_XN~OvNR5yR&nD@nu_ir zFAjr3;YL6+SQBVh4B79gH5*+gdzw7#U>a8M*fU}IGWvS*A{+@Ot1Ae1wz{%FlGW@rAwt09R)n!37O zr<<0*Y$1Hv8?Y&Nsr~kToC0yi;W*v46Vis+cBa&8+SmlCLU>ZN=pCDf_8y->N&VX* z#?U7tuDkPMFv1|VK+J?KZ9G6Hs;){1{&6{+!ihWDgowalwlMbXn9|;d`K1{%IUAU; zEHHOZYnx=e6M5%>aFfn!0Fth{gQV*MZI)8GMQc} z(AxQ^jW-<2hfuN<-;3~LMbT}^lKg?H9eS71Awxw1)AM|XU^QlIF4ngzIX)q9fy{da z6iVSj&0>n~^A6mWQ)O|;qtw0D_Q{*C`#BZeRq{BGC1Jp&_-Xy>L|Znd`~)}ljSTYd zua^QhpD$7^_++hs_O-wti2g%r*)1=ZF8RVLs04u*60uADqhy*3A&+8XPv>f0Sva1hZ!* z=PMPLTJ03$I;7N^D@ZvI_@QNfgCy}s_FPlc2BsJU(Na?__!YR14pOKy4WfxbgtcI= zV5+q#ISUUa^C@d0fd?NzRV?eEx)4z{XDzB-pP=rT7|tiGW#x{nu&HeZB4Anhu0#!Z zkFn40e7f$DGDIcGsnG^X&nG!HWr_31hECQZq|?H(<|NK$^3d7kIsTwm(cJHmvk#vB zQ0G>^O`w`}kJp53Wzl=e$tIbfW7!3YBjkG!C-&fc}g5B((=S4q1AHfm%XvW-F0 zXeK15E_;1AilR(To@enkdml}sHnu88p)oGG^ieFVguUz%W1$7U;C_2>AMkz2bS?Q3 zO{-pA(W>VINQ(3=_OB~cQZ;!;u2K{FU%jlg_lE8n5z!zY@&^6xEA{)PHZY9MfGy*q z=M{4Tp#7m$le!elD2V7$NMhgSuQ_8up>TOVcBR?2Px5Gj3y{67(xq3G1bQPJ7_>99 z9?Sh`U8WdG1>LoE=gn~RUS^8ysdcx$)7O9tAiMk+{$4GEb#;^ryX{Hcl&ugoeKWVY zSn6XoTuc{o9ofqn`UV7L#gYbwN+U*LvV?1SH+ww3m6IHkUY~NG6BsL7SbBH*$RS6N6|bye3lGq2Z;8stK7SuS*qQol-25Zd5 z=>5|nk$`h4^1!gSwAKbwkIf<#?kYB_n_-OVw+#)oQ;=u#)hH(4@mZp}b6cH+)?7He zEH4K;S>MCrF;j;ttX6RUF=-6^-t3bl&`rnA-Hqh;(6-2%^AD1F#6{H{r$lERB7sV{ zVL5?@{3M)$!Q3B=LqHwStX0(AhkXQ6BFnl%aJ@xIf$sCbT>I80dRkE~OsXOZ`6^OA ziJe%hcM^Rl)gCmk_`U89fLS%Drs_VDApMSAz109AK$@urd|cU1jL4@jpN?j;F^rx~RhD+Ja@(-OM%DcvMV zsD&B5o_19HnLFTrGaYW)FL@WH&{`H(D=fbXtQWXC15#I1b~Nw_Z!=)?DKZa&>~0NC zyzum=U_UD`smcKF@uz0?TBLzBJOFvwud<9@9FP!eC=K_!_bK$e(ZedMU*%r#N9};e z5%Hf`CmGN8-iEfzLO}28trE1g&6VA69%W~vvkmS_S*kRZO(F=-(#D^8f>F%@g(~|{ z6Pqd7O6jt^!p`Zll7{Y%qw=#Sf|NJpB}FWeMX0+HW8**kE!S*zyg`JR&Ot7H`oq!c z(Oma<{<%a~CdElEO3+Y@!pSpf8q*Q)kJY z&Pbsw6@49(MO|gvH^eUjp_cs^gCJR+o2PnSeTI8lD?5bdI0@9d9p5%0^LwVw>K)O{a zy^vDh`CWEO0N8RJLW+$02`)=7m|tPDe$j!jQt=rK?Q|8U%<(%*!pPAau?p^>rOKtDZvmPz zrrv_FdA;d^L7Tx*YnZ(F`@de(Tn2>ew z8`vu@OZ!ZX;HS+WB!y6X5)!<==3rC;78WnFPwPV)q4A_Og{8$+7f18K=w{UPc@^o% z@E5!+<}#|b`P^X`^>{X|Y}6icFn@N(W|YFpT>JQ%_pgV?lr#huZ}vdRmwN)}ou7e8il);`$VTw6;mg?oz^*)PK`(h5$mJ_GhE7YMxs zw0l9xCXunca6XPzRDGb0gE)AOkh2!Ku$%n|(}wF8yQe3*b?>f{*Ar=Jzk@JiZ(wQh zJ&;7-9_aUtrC7E-E<=7Dp8*?U7BJ~GAv3hP5FgORcYM^~>H-$&G*94Yp+oUta{I-X z)6Pd8tNFGda?us?1ZV(6J4NN(ch#R=r;fcrtHTMLlimAmQ@fChHdZ743ZQW5f+F^U z=KB<~0s2{#4l+hSen!C47J!RADgLy4*%+)WPc$MxPDK->rk!pNWh;$lT~*>4)3AkT z27sU%rR&f^^cnQ*HpJ504j~fRA6TAZ{|lVQwCsRKeR8-DlPr_0`j}Qr)L#`)4nK!2APbj3hig zw4b-b%`G9VFF6ZXl`fF$A@FYq19Go3g{U#BTu?xLheAyom!;gnd3*-qP5kd+)DUY86^sDRtdAZ#6?rZxjd#4j8YGh~`yCZwVhEaj!I3`|%7S!yB~ty14egK;Di&+SvxcMj@XtzI{e<-4=g+8ZK7tnVy{oh@ zY+k480$$&lJ1i^?Ti$irK~~n2?52o^GZww((K+iwyH}N+Wct@;X%&td7L#jYF9Ww%Q-pTX;8SUa-tG>FV%(X&`(Py!u~!F1r#xB$fBbDoxNq7~030hlrk_}Mp! z!fEaSibZr?R#kNBiXqs$IImw0rAi0P|2`lRw9ytE)aB8b{p zxEfJu6_z*&hTk%6had34!|BtxX49Cxipecmz4n zRNvtISvnoyHA~NwRPr8rCaK|q>}YhNebM|YfqeZH9q+qTs1h{3%3`mVn18~<&;wRS z=WnjFjno!-02_qy0y zfI-~nhdXX-YV)Cx2jlBSFYhBUd#9qFD)aF14`Bx!sU~nCu00Gb#!ETN+AA5?hMnK(<^$WAv9cvZzGf2w8;5z|&k|cqtr$=qnrxmwu^MmxRMj{zu zPTQR$sSf`n5FM-cLTz_NHLZ(&;|(n~A%r z+m+QxZ~qamjoOK*0TVHi<@Wf3p=H`DR?;JLRI`FSl9q2|7gncuIvzg458gU8O=JXJ zIH*>MVMHCh&U-pAS(elStbJQwaeC0aKkEmx((+lyUF;9S@zXe((D+J6^?fd#7@K|Qoy8U&f9`Z z8%IUcka?MV?cOqq2`x69pL1&88c?M<+KD>YPajGLc7?u*|zAPfM7l}(T}LNZ&GtdIu$#~`@4oQ zIoQ_=8n#~Aq?j5I0k#-A&aJ9f3GFN}dlsUE25@ng(pv=`h6GF^oxanbra3gGY3NcB zs@Fe6Pj3#|&H&08oTp@R_kPi5e6=4Zx-p9M#PSGvv7b&5n~; zFV5i`heVP>aWu8rS={SiTApHc5H=skv8-NZz&7ajNb($=mqrk`6x1x%XT}Bj^5zOB z@v%WA)~#1)DhFFpd7|x{YhXY(K@x{Xm&8c+9DOCHhrViI2k1tW4Du%ZYIckrZK9L6 z#${Uh8KZA2LS5i%^9)B;9Pd;Gdy=*~F@GU*xI$!cebefth7!I9#4OlkALJ!pWk+V_ zOY~{OC7Whzp@(Wq2a6qFwxeld7W zRAgU{>V9nePWqwuT#WK5L5*y&hS?_(linDKB4Nzv4=Kh=jVcn->E@V}$Uq!*Hs$ck zi^Gr-5JJ8kzgV#yFM`%&qdHJy90Xm_&W>$%D;ZN}|KMBkAhIh)2>JTUbJOY3L(GQ&Xh( zGq~dR%QoaRp+?$+hHF#|+BMnh9KB2)Q{|7|M|8tlby(n=sv{=+E+ahoXP&?VsiwZ} zg=)t5j*8nw?MaNH&Z){c0+%Wz{LXyj(-~GWEr-Txuc}NldKb||wj^fJGUh$!o;Y62 zVt-XOeqCksN1{J?;g0 z$W(EOnpPj?jAJk}+-{uG%?Zhag^ydQjAjosQ63+tetfIP&Fft2qE-H;wIn#(v>UL= zP9M|lB^jlMb6#d)wxXr$XaYpHA2e3>g>zxA_{d%+<|Xa0e~l8>A>((fwJ5b%h%q2I zjp4|~6!U5syDgjyd#hSw*xx?X1SuY1DyOpzT=z?uaBWx#{gjp?4d>pkXKEW$0m_$! z>d9H{*T`aI`G)NIwqlP^>=PPnyo10V#uld$YD~7td!}dYXYcI4LLu$0Vg6VA@Ua%t zT@Db4T3|o!W=p`Jw6z=)#Z++`h5fYrjcl)jlxoM2SiEUs*T{!oV72n4-r}vZ=m!sH z(npCJe?p#p#L`)`q!P#XK8Ji%5i`|}QnXsLnZJE~jh{iS`rwji&3CXLaZ`Kq@q0qJ z-q%|Mh=peW-AmZd8V;!kmUs$;+~Y^x65}kUzA}VSQ#VBAN;fLo!~*YQ{RDwS$?50m z!NX`5=pLZOR`ODBGGBFdsafwvJA(1{M)O}QlernBj%#sW(5o8~m-xsLx@V*2Ga8I| zKxiJK?E=lK1isVv#6;b~8EQx2wMqSD=IFBCNOo8Nz3D07p6E)KN|h1053F!YV8k7; z{s>__GhxC_5{JR%{LO;1BN?!-a3Rd8wBk;yl#2X_ZuAffZWVp2l;h-BjI)`ln%rR* z;k!c`6U+#I!r6|W=~B}_M(7?75L&&ysc}ZrHTEGD;Xaqw6SohgF>NmT+V89*l`-k>sa0Mfs7cF1=@7? zGsHaZmY|H|O~Xkh8;1u%h@VsGyLK4|3!A3?ZVVZftw*+!h4B2>A?Z`=`Ce-GX>1v0 ztQ{-2?iwa?isbE)?rcBC&#*h$Y5Zzx_Crxrn0?p)?0;BH`JAmizy?9!!x6(3ZmVL5 zk@x8azDMHmiUdE^4_CRc+~JeBE+bXi!&m?94I%Fe1O@whuK*2?1oN6R!Ff7jp5vGMOpqp2~P_! ziFDM4f4C$5sjW;T~v@)6|8)&yp#;~L~?jMO`m;lb3N%_HhzQb(! zUpL~wZhS{u^cU!QAd>}bZ|Lv16secxqKcsQWY^n?mDy!_tTBn9U~OtnB4D-Xy&-u3 zc6$9XWhI2hC+NwGp-+Be`vCg2Y;dejpdU+6+y>bL(j-^7td)-N$S8SF5}C@)-BO}+ zVJZU;%{36$P5pQi$$9Qeb`=Q2gzs4(*{_m$bypzTZkOBh@6puMD1;XdpLAT@6g>1T z)sB-WT(UAT(j6n}TH!8OF%|B|)3w~JP3SuH;Sfd!wZW$olg5_%gKY7siSIl3zJSXQ zcVYxSi4WwECgh^e4`uqyA7!8t#5##9yU(CBN`N{3xH0~N4|W4qJ`V`1wncby0D!2` z@&fdl#6n?U&Ss0;qJnDGX-JL*9QLobMBlXBuC(+(YjNty9Q)L0oEmZYoh+dY^?nk- zn+6-mS_J{9xC#@R520O4*LXC4GR9qWXPAgDPDMVDmoS?ntmO&;mzeDCP2TQxx0&QU z?2(l=qubVx=@x~Yt;ujmrR#wS`tV>hR5Y_y@RN5vji zO{kf{;YeJ&DYLdJYau;OrSMM5bN~8>{g$SHNVwnDvDFc^ z890VcdD#L#^AS<$h~uGg+!g(C&l(-u?ZyeU3^(nIo|w4jt!I)wYj~4tM}?9_hvw?T z!xEpz0X`~5D8|b5CExj0PB$klroHFXu6`;Z^t9n2GC+4^?RwE8DN3NfArA!`LApCQ& zXP>vA!(j95qHFhR1#K(Q7@zv8>B+ZBCBzgOBWKZT)fEo&&Rd~O6745bV0b>$%2RP|NJ-j>Cyf3ieRcrj{BC&SU9 zyyg7I@SF!zaSnQ^UU9zg91litN-SX)DW@v#1>$I4u@i>1DN&t;IT)Gwu%B17ccnEq z_zZ~5!^&>51ojIqsO4@Bmn{}+=u?tF?2Fs@><=+&*z8dZkqs#{M}6mEvzFJM#%2#m zh+oTwkB#_brJL?QaI+PeSoL~05^m>&wvgDk%Evuz*F$8M;-9)$8J^8Pm$0nFXKnOB zY0@<|37e%g)$(~j)P0B-{sfS?3)9yQxzrPm84dZ{vLyT!eC8tbvN=nw^RA1+u3o=d zwt^*VSYA6Gwv#q0L``YEsGuXLt(r`W(g z(L3f7+`5)*xGxF�XiX%x0BRZKP0ib#c7pQmf(+FTQdm0G9S$vgdfOueUjsJmM2e zhc&+j>hg9&hd{k~c_LTdZ&Wr~?#zS_gD*;L612WBX6ItVo7ETsA78&trmS8Bg2fG& z8>2icRB>f48`B=wySpS%9f^ z^;Swg;Z(N9w~wR9p?K>gk`(V?vd072zVZ+5L^67PuJ{}gJh+qmn$Vw{bMlKm#umze z>?erC4W(ijC}liimFN?oO;SFp<(PB7E;cD8cBc==Xppl>nwOhVQE*O)mfz*U?q*w; zYPqOo8LN&mgSZ)L!OD?-dt;h@GuXh~dA^lno(7&tSmyQ3B`r~FSCs`YRrj+0OX^8D@=vn~v<5;Le zsfj^%#l|Xbr>76{hGr4i*7d`WIths;Dcf6>DD>hFtwY>~<6Do5ggL&ZT9jBNZ$8=H z)m)|O$7jG&41|O%zLpqSdO2+3Cy9bsBxKsNMu`2VKM$X(a*rQGxhlcm4Q3zLgi)88 zHcWcFIAm+Xx!EM1E_LRwLXy8&{(O(z1^D5iz`0YsC=feAsh=TN2v5B?t4b+nd9?+n z#1D1ZIF8M*t-Z9VsB_1^g;gI}D)qDBkR>e{in5YWSN7crC>El=2h8`2m}l6dWXi%X z2;ss6vkQrsn58IQZQ1MZ;?S(*2t-FDSr~#=RpuBls5KZgI+uFyDuQ-NN&>lUV4~(U zqB`(Hl?ST*SHadWmCNsmwR*m zcbR=g-Pv=wdOcJviYDpMwztNt&Df$lnt-f7Q7Ojg#10r%$et9Cz3caVpLxDl$jbz^ ze2I>`7bx2Y16)%Dl52|_HnhtXSzA`krH-T2?!I?S^IobeA3xggI&I}pWQsbxS=EhQ za;owS&PcudTyhvW_SU*nL?L5MlZsH;ne@3zVL&y}v6j@S^yjJCGau$90he}nVj?uvY~O>z+;47N@U1Y{9nK#xe6?4B(0@$frAArLDYhaD(42*s;I@< zchgW(ZlG^sLiM%p#tg$C)GElDr^Q~pA@Vi*qY&M6a*b_UA^&){b4mXLHLBXpho~^7 zO0v$|-9cK})*J?!Eg7kswcu*gBbk`SIka34va@odx+Vx+t{J(|ev=Bq-ln`md^PaW zbrk?AR{Kj%1jPVQ(UbQNP|?_;-MGCZJc=hFZjp}3E`iDGC5GK9UHXGJk&)@iwZav2 zgW33)Sd6Cvfl+VSnpoXwv6FJOsX5TbC#BY9`GZD8txt8|*t~bw(wdUZ=;qrFJgrcn zWH)?ftcI=#tS|PAKD?&jFhWx`O=3MX7ygNum!BAosw)ug?3u61U+j3@W|G8?a4Uz< zBk^2xLyV3H9|WTvz_pqNVV6ClKQ#=zj^B|!L)hR6i3#6jvQ}$KccyaqAez+{_4?^w z&_%|&P9uTV9CIK**}c+Q)|89!!HJ8Oa>+BL)xn?sGu=igR#TU@mVMJ32ceu03E9Rq z%ECV3FWS275m%{UUMa>`0{4Tx`N#Zasz-`t^@Thrrm?hf)xYcx_noym@9q5<+s!aX zTOMzIRrwuCVDY}DSWB#;-N1fAVRM=F(rOx_Ag#jTd2Lxa8?L()N`Ks^rAZIo&*MqK zRqX4yfp}7!7SHT}9wK(vnL!&}=v|c~BSjz7jL5t?ZGX#(8ofM}kBK!sQ`iYs?KW@} zi1{Xuy>{K|tH|?&XqjY2ru!F)Y~Mz_tZF-es~m7Gsy^i~@ok8ZSLqKm1XR_sv-IOf%HBZpa<*&N!Q&ARDfmbf~+*F^u%UzcY45)uD-w{8vqbUJ!DB=G#_2Y=K5t-#O(T@w6Cem8so z9m7Aqjv?LP-;vfoUIf2I83aH_$|beZtUv$cw{Kz$ARmy!NtoGhKmPLwLkOTQ9z)!z z#bo~PoBs7z|Nrm-(iu%`#MKT!7eTBJc+|fE`m+;*5rIj@e?Db!`VjJ>qgD>3+e7|m zpWnI=Sp4%H1UFQUUIkY~tt5__NAq9xoi}_1Avdf6aia8pgMT;B3SNrHS%iS#nzx`u z_r}?*0w`;qya7}Pj*kmz$Pg61|LBU3$BO>Y6~|+&{71v=hc|R0zInL`cilooSK!O$ zQh3T$3E#4rQ>MbPP5j+6;vU;d40cn#!jtHqZ83d^eQh^q4 zp&<>)&ynK%a7FaSWqq8txIi#8u}#aq1lx^^Mp{$D+}@60t1>W?2{x?Ve4?a67q%*%hM28CBb8EG&9QP4`}1IWW$ zV2Uwz{U#@!ATP)wg97Nf)c(m20eZfND$%(WqQ3^8cmv^7lmQG%)Ox4}VA)P6WMq!! zVx|Yf|9`%mRZt^GXf0e@a6F`nKmK*7{P%m%?+2r9)e?61zd!qP$U$!|fsw~t48&(b z(O&`ByOCwCVf`e)#-+L)(==c8sOi_>c literal 0 HcmV?d00001 diff --git a/llm/img/pytorch.png b/llm/img/pytorch.png new file mode 100644 index 0000000000000000000000000000000000000000..3ecd8af36dec3b5b3fa1dda63a63fddfea0a874c GIT binary patch literal 53985 zcmeEuWmq0b(=HGQ?(XjH?(XjH?(V_e0|X}!B)Ge~1b24`5Ii`9Gue0d-96v_KEKY# zHP7|TGdF}5}%z2S|JOUd(6~f5Q~Y?>!O20RN82s8gWq1`75yN{3M9D*>k$s>w|@Y4it4`$9=FKtdO*ztnz@SR@Y1?7M)BX8^csIk@lwHtAw zeFwoy@p6hRX_PJGXKih2 zBtg`O8+-4sapkG+{f@eA94Ppl{TUKxD5g5hGI|ZBee0GY1DhhNMNKvG|aW6 zE#&1vXn=Dl5YPx~5OClO6!_o(A3!$A;UJK}Hwy3(%Ln_b^j%Z_yT8sseSY64tSTxk z4ScJbx|o|gxLP^7c~>D515GVjt7*Au$;m`8GkDoM{jLJS=fwk@+MBx> z6M5O&Ik@t8@ss><2M=)m`!XX5(I2aWF73Fp&tr5D^jaxtLk- zsEA4YSsnPrPh#cf=ETFu=;`Un;K|D1=wivp%+1Zs$i%|P!a@(+LGSAA;AZSa@8C-M zpGN*_N6g&S)WzD#&Dzm{=y$uuCXViI{3Im5AN1e9|Mb(`%lhvpIk^7$TEGi3{w`r; zW?*9cZ`(jszTa1Ql&rnX?X<$;J1_{r@WY`-%Uksr7eFZg!@B*8E4w z|E#I*YVIQHXb(KnP2g|I{8{;*g@0D$WBmQ*{}_q?4D%mXfpHdq;bZ)-%miRA^2#JZ zK!iY~#e~(oK!4;wc&SNZhAT=zAp~tZ3~n?k!MxTseFSqq2~$~Bkvvh*wSsjI;wft3 zg@|r%Z|5bG8VT%zu3z8yAz>t1x5GP?qL;;czj;44nL&z*R!M?}LInCh4=5!tYp|ak z-m7FnVE^+lf>NqI2>JJQq-pTd<@Z0&_C`}cpcDV^1E|6Jzcv0I$^Wm%N~G%cM~Pep z4J0A~)51qO@09r}eOiswNtUzK#uT^X+1Aunx8r2FJOO=;N*(&=`-}8K$po2rJZ>_> zt`3#2D&(yPX}V-oR3nX3QG1kTKU!+54jwF`F&P=$PZv{JEhooDl1O)H9h%aqRVB(a zt8??~Hen_*Scmn-Cpfq0T3rsVHOLmKb=%xB==55Y5D2)4jwbU`vbkMlqEN`cNT*OF zbGuq}8@^5E@E&x({hG6^Y5GW~&9eD~Nv}h;T;hFmm`tr&>@`l=AmrxO07odWZhN)8 zZFIx;kwJg;@=}K?K2^8LF6m%AeOS5AUv(mry~t^&@0>Qp>3F%;lqKNBi(0+xgSlpsuUPneUeg_jn|UiqB|X* z*Q4dk_4_n&@77pqIk=2Ti9!yU=lOc^w`10lMq%!aN>=idhuWsfY+0U6w+HdPq2^TC*Yy!dOD$1@6q@au6tL*1U>tH{ba#b z?r8Ag3UxojvQ26*8eP96;{8ska$y52X&#&1@^qsBGb}WUiRZ?GgieDMvL*La+*-3^`ooFGd5U1*uhX*STpsta^>=KW zs_iDe*Zb10j5r*2PV!qYN3YjMFFXAad;S{vdi=j0Ov|=4q<(((JXfA93E3C zma|^^MXOQ4mc!_|?i{h!ys1tE@lCJ8OZnH!jp9ulkMnLQcj3?DIoZ9Txc$2|$8H*P z!fw+nPRFEx*RG#FVGj%lgaRU43Dy#z5=US>`fE*{<8BY$8^=7=9gB_yf?az{kICL# ze@H1vO>8?8U6&H}r4(LOcEL1(|3vT1l|b<4mj8XF&V)w>%Xju+ME}hWZ}Y7#|2q3M zj;QG9*<+8wm%Eb%+QD0==Z+S#^%{>ep2z#Gz_?wIaSs()lLW{?bQyY_aVxgMD>&}J zUmbFpT+$M81eN!%ynw(x^)zJpcv0PTi)7HNb!z2p5TYsx;g>i!y*BJQwIZ4vR%i-` zW0QzQ!*Po0{T{C!%y)G@)IpUut4=HBvRYQH``Ds>HSF$kO^K;qlVNxW1IayYUu{Sd zS(+gUV5dYVy6J@^bUTZ_V5n#vPow9|2^W01EbMsu<$I-rqmQna8{`SUxk9I1*GAox zd!Vw)!lU2yc!+xwN2Bs3a=u(sQ6W#Dk}oaeR2Xule#%3B-r8}qW5sQC(EOEv>k_k{ zS234QrO`%Rm=Y;|rTOCMdeyetU1mJ)Cch7j1PwBx_Yn` zrXfLZ)x=3@JM{`@P_CYz4<07@j{}U4Z8$y=X)b<4T)* z`J;WPDISj&+~2fonLKZby@)}bU<5pK%07KD(nmW$PBp{ zX!QY)*JGWo4-BRFyzv*><#K~{3Pv_+?_3=s)uGSbn*=UvC=6S@OX` z(&cfcGv^gL?(si{v|1AB1<{6j4bdyk40Vq#(=q0p%F{{H0x{ljG4996d8{i{?<;&s)H z-Y==R$?fRpGMk1QZAB0UEUP;l&zc9v#{g5W{Mkdol@O`$$5%U3NVb=wi7dv`#mc!S zkuPRZ$+IH7#oOWddEavRb(0him#ERJi4D{!!x1C~F z_%LTf3dLNLE?qB2B4}eTA=63AiD>1>S!+nh-K5jSueh|K?{4W0I5Twg*52V98gXn_sI$&!soK{Cpjzz z9a|RAvjY{ITCgd$N$f37Jx$`*ra{VIetCPmX&dnu??t4Yrjp(jnvJ4VIo9|q?movC zR)q>Yi$)-%#A36Ky#2ZtyooP-w{zaUW<9L`f`53I%}xG?m)vp%HL%qu2ad|-^x3aq z=fivLI2`6Uen<8Eoex};P|0H6svR}(@Nt2^uuTHwb`IV4N0Q8(tHs_~9E_!MksmLf zdFR2TQaho)y+^<+g)eJGsj3=;XaH^Ty=Ty_h!a`pDsnwi2!B^5-oN|vSoIL91}xv% z9s-`A<)MHWItl)wIxmxJ;(XUx@gSwUE(!MhLuSp*r}MjSa}}7Qsrgv)l|-}+8mwvsWF1?nb)WCf z(h&t7loV`UOoTffm0Yg$9A+#rE)@NO!v%Kp0HGW4xGD)K=6=HR{f3Coqef9W?)wDC zyf~s>u7O>ICqq;sMn?7V_3knPaHB%Y~;9Hnp^IUnf>v@1dURfu+3=6})U>e)>zu$1UC2<6u z1ikmH5GZWD)qNL+!VNi1Jy)sAdAk^O{H5k-hkK$jhsbloJ^d7SZykZ)JJyxky=PkO zje42d;t@^p9QtXl-!*kR^&%QRy!HxO%tFU{8Rl)ESs)=Av4zc z1!B%~$6?^cm-&6KC!4SAdvT~so_Ow*hbzOrv0F@fi~*R$l{(^|sNa(Q6YG6o1Pf5N z%d)yNdGXpAh8nXklX!EE1NTAu}ZXyrn0$7rv%@!%T+<6uwwCvzqrWQ6 zxIa8&fXfP`(SB{Qk$}~TtZz5erxyff#4_(zS==FM^i%8uhhZ{Sc-Kj!EkDlA_rx0Q z6k7Evv*@K#W!03Gm!id}qU)Y_h%N5#=?w5pC*Bthj8%94vfv5f5!_f=;0|_uI4Q4= z3;sZ62ZRakQ^Xixvg7>Fh|+NV_dpU3`^G=6g6-6+3=T6H*HA zg(=*qlBsTXV;DbiNob*b?%Cs)p}87D(qN==`tB6u5^;}Qr$ImU`gF^RkL3U4XR+z( zr7Gl1rBPAZR@HUxLokgEiin}rhn(iz2*wDtKBrx8k+HXfAJ%gc<%zK6%+wrw{T-|C z285OoO_eHdE8saNS5kIQ+H`!CdW=*9JtytK)Fm(gEqcYy zuY~?DN#2Uv89UZrMKNBF@U|GVei|$1@p>wBMAL6_KX2~nbNSv?)N<`Te{hx1aDSFt zv29(Xrkg#!#dh;(S>`$8_1GPdZXY|EH@v8ydgrym1P zwN6*564N%VC4@&>^ENmCBA-Gm`FFt(h6@1FjRFz>$sV0cgULQ8T5hr z=tUKX)N)I5=y*I^6y9zBP$=9Yjyl&9{Bha1FrSIol>09!E3ZkC1k5t@7?&OS^`L1iWK+&C9S}Soej~ShqRK)Tc5W)$ zjGv?J)!m8j-MYiCVT2>XRY7~3-)k7OERtX&ZH66}xru%apS9QTxNe#xIKw-&>W(9&*O_`2=175)>JNP+zJvkOGnVyA^9`>iS{BCfy9MzbKX zxnQd6_dlA++4IQrN+uPIJxy5X+kGF6FBU6AP08P07vsC-?c_F+#d_Kj&BWctaPRkK)8=PznvB*Nz6oWzUq1|9q8Mg?gXk696=p#2iA^dI{pMf4f`rD}P%=YG?-+Sye!1iLC{ zb6|z{!tT2lt?$4LX*T0ls)4_wIcgh%Mh9iC7vJZ>lny+-Z6o>6#!(%W#8g(b)ag(t zx*#O2ipwL}d@j-2urTn^ETnRnW`!5yPIv0|_7mlTWO>RFsN>%upx_5K17+V(^9bFS7NJW3-l=~2w?|Gh9cqmIAAEjJp$L*%j z;{$096*mzY+5E*|an`dHKQGZ{zV7(E4WS4l_~v}GGh&KoGMnGD=`%4xSZ~(7@UItU z4Vk=DE)J{%Qe?{WQKaX+0Hf$-Q$w&NYp9Q_ng-tD5urFFr}K>j=eJ1flr3YM9=fbI zU74v@FCpSsSMAW~R5MPDk|i)`x^Cgb3iud~N3JM_{+qd=1Btv4a+iL-r*i$guwe_j zl7Iur2VrVXsWIKV=XTuWJmHk(5iUU1WMJu?O`9tmTXHV1kQeW*v3J^VyY&3y3~yH_ zLJ6Uzg0wfM6Y~LqY(9jncB+dX^F?CMSBm&Q_D`{zb|hKM1m^|Qo$h!y!x zb2<06-QGClF?zjRK8ULl-gzBx;r`|N&!^e~Na^-|SU50|h@Wui zN4vk+^u0)G-U=8tGnFVDrs11(@%UAe7r=rcH*jlAr=YcOWE~v*3s{X;OdRI+h7(%p zO+htK2Wq^1hwOF+SQ?%mZKL=4MS>>ZYn?b%If+(PNZb7Im;F|o&m=@bO4@O5vA5ny zO*Dxg%s@-(^i{5Oz|vpFia+-tIK)&yy9STf)Q5ju7&-zRV$iI7@b%t~>={P9g*)MlJ|CS*qaea^J$Ehu{@$r**(j^nvcs!T>Ry{$zB(=sA4EE zTvMhe1bmCot?sia^k^oNd~72|<73+ptt3kSesaK8w!@SMugJTaOYmXXj3>` zFMLk3{izlo!s>%BE+%@ENe}8v4{n@#3wr@_%=JS*SA3yF9Lv>5DFRpaXnF-`Y)@nU ztKwx7-ipc^gtc&-MQ>E9YNv7WevIg*`p>ZgBwgS+#JOPN$Z3}?L&b7%qEk<=jc^B` z(ew{T-!e|iHWHl$?sR6XcaA+0Bsh3d5-T~nj$rXsvDu!PDvD~ncvBgv2(WB<`1neq zzTRR&8s09)(`zjmM|lJ=)*$K?&p-{|K!Cn$N#8|e=uxPR?sV06a*AC*yvS9pVBczG zsJ-#N<=(7fK02PWWXjQ;e7)K<(IS2JUu(2&*6~98bdRzHPNnw{LhAu6Ic8x)9U|JE zLs-10?Te3XCm0EdpnjZ}bh{hng=r>%F`a@?F_*DL1H7=c3_pJHa8ztZ>R&8f$TrJu zmTm>4@a2`;@^gd_3!89x<#-81EaDh;`L%K(?p5D&PlkBe>D60Qdgn2*%@!sIoHU{1 zGeD;f@I6CUrX2ifa(hyQp6RjiG0?SwP)iEe2sEUPvWRJ-DKCQU17MzAHJYH#BJDU zBge;6@u(0KL&D-*qDWXM#nVha$Y&BHo7y(YCkTL0p1j~9gim*8wsj_L zT%)Pb`-~Vi0k{G5$+_CSHyrLKZ0%kPbJ3Z;Aay3!wWj!^`{X zj<^d*!SG?fbJfXwswW7XAT`D{YjWx5#FD}6gXxIEPgq@RdRSc9as2UoOqVdpDW!K& zQST>mbhXjT-+1xCRgiBADc0Q%B8T()@&|}Vn?arSBzXMcreFmwuW$MFQ(5M#Y5ntg z7xndyCfXtJ%?VO3o$op#G zqz-rG6SLpdhAP!vnkFzaUmZ~^*(6qS5^KBh-`_UIN-%x2Gz@-E7Or0#JOqCixOgB>qfucLE+wS$iQAP4ZAb`}*V1HFP5T2Flr1(6vegSwJ3UT4mJ0-n z#WyWhK1h@E?Xu`vPG+;;L8?{p++sLpg%GJkEL&QB`-Q_P5B$K4arO#fz=JUB0Ee<> z>$jm-_xaKpi6}Q6#SZ)_^`v1qoNg!fp&GP2zw0QXIoF*&aXCNQT@p?^Eys$Q8W5f- z$SR))(GUU|aGcqi{62nPz;Weqo8B5K2)QEKk%@dy{E^rJF;8Hm7ly{0`*%mmc0ogt#t$LCz@* zEK{#|AW+6FY88(Og~tw2png zUN#1e+O$?4^WFJqN=dBwD2+cC_0ylpo8e z*D~U#XdKi$)_%nPV2Zsc?Dn(g8EjX+wIK&FwtQo00X9EuqB0pW8zkb1<`^n1a` zcZF#Mb3)KutKExvrx!bu6g>&9HQ0->g^cH#g)kTv9|Ghh+dYOmhns*Vec{!cro4R4J7~3>a#( zB}IY9dCI|Xc-+iQ8>;5bby;ND`^$dBEJC%s)GyocPW(T~7tbDYeR)2Gqwls|LI$y{ z)LSC5>P-TOFZFMr-#JJ09ZDP-Tpq|NHl3^$necR^Bq_MVh83T-@ZJ{y24%3NTQn{G zxR;3r6({+kE&M7~c7Vq@yL9zKAa_n84(DrK$|zY^FV|NE=-IGnbXut_4*Q>2;`&+B zUJ>oM$y&zC@ezdQRwmZGp66y&5T>QOh*N4|FzEYd&08$+Z`x%IuMc~Q*MLXTcGAZR zX?rgi^5#4!UZA{EB%7{>t25FI-k612vc$W`GQz4dyGyAnS7< z>;{~s*|{eS&-~@&GO<_oT9rA$*1=J(uSWOIYn%%FAWrb}%e*RLyMNXmPki_0QA_;J zdblNFt~wSe)|NRG&nlA)2ntdGD*7g?R)p)#ExzKn`U^gRqd#LzB4+U52G5shLp75- zx!}5yt~SMuSR}{ra5kUFPIFfIU{Oln)E0Op732iH1}SCEWag78k~N1 zghduMz8tWa>PlFQDIj0X!`VL3uL8QnU}g&B*4MRSvqig?`<(Ho(yp>i2(>hvW9uzR zzj5`{To0IgOOl|ssEA4n7h;0ha#w)WAoT?A@pXTyC?j^L9lhgI!mMb8fJJlLvu++{ zmxt?bIcd$bcM=S|TRDN1)DToGC1ib^Fjf9TxQcaR7tx0p;*L_BWWwN60xG{c!E=XobXR1#N&{PNqXnBB4FJX_98B6O?fUa#vM5wP3qeu37F;j`k`Low-5x^geM4|+oPQ$;II1flT|@r zIGORrpLX80)ep;zgENm{F^plNCTvvsAa9}iC3C)Gt6xRxJ=Ci$3 z_}8&4k(x4)CJcV#o>yiH_V^8H?nY{ zXcaqD*IhbUu&=2->#eQ}eGm1{Uje{F*rxNFS$9ooh{sv zit4apxO%S0--I9P91!%ij{y;00i1B41HL9&I0NOxOBv zp;iq>4wn4Q`4J*Q%BOW9S|`ulgBJ$rClwdVP9@0{UtOA};RKI7bxVrWmGooSrE&bI z_3L4pq>u|`nYan7_Hokd?&0ph<4?u{KueNR08_I|NfAhp|GQUzvS^$pz)fZ+4P4nP z{($WM^y2_6|4qmFYsf}LKSbMKI~^(isWX4I z1~mR6_9415PpA&(@6P>6n+n<|78>O6Zn-J_cY^^WLW7HnMB4CwrBwgh8=-5Ye3LA` zb#JwQH3%w*oNsbUnOG72hu!~2C!~nH0rHhR!8sqzzZ(3I2Nd0+N@~i zq9;M>zZ&dO1d5)Z$;$tXQU45Re)ma=;P==^@FvLotHEtbpy(AmdEsx7|Mg;Ih@do0 zqy0IO|4EnqlUjC(3KRu^joSPVSph%)S6u%X23N0qpHXb<9h#@yO)ST71}Ihr3GllwK=?wN!tnku#o@N_$cy3 z6125}T(L8mvH_LYh_Ojb(Cs?Am~h|T&%RS|3o{3wq?wh25<;mJhUT9lm2G3bjR zQoGmSQ2SON`cLyG#FVT6c}+7)p&uX^mbnu zED9bTopl2-A(hMVEP1b?6ck~SP^+c1dv32cj8s}xCs~6H{t|Hk>6FaCz>bSg4`u;k zv^Jq$ekAxYYk>0-hh}Sw#q2_-c-`%T8bV_w?aa}XH+|;LoiF+L9^*(aM`Ndh;`g!q z#D?V8l)Qt1XU5SYRmyJ;$0zJb(=v@@^5=dijh+!=vA874&mNCeEL3r3EtPR!=nd5i zNGC@gi*EMUcUKDN$+w=rrzN78lj*$doe2~)wEd=yJXpr=d3k_pL@)1P ze==pWR{4&cP4v4ob{zdko#OYl z!5KcRnv?56dt|#)LfFmezNgh?wltZO%&x033ebqqj#u{7t!KY{WwZCls@DJJvtDx1 z^(v9;RhI%#LntTf)};VRxPhy5EVB6C*P|-E!H265Ch5c)F%6jWH@Y=7W@0A zOTcND0yvM&@>TBo?E<6=W$Lc<&$h+3OO{0dk7c$(yDyL~jX|Hr(Eqk^je5I3gHqAq zt3hX5gY_JRa+Y2c`1?$v(oeX0(9U6Oiyg2~S8Mms#mpBQb zyw-{h!L;ok+;J$r0W*Op9|a1(IL^wjGtJ`&pnVo;NnboF7eCJ)L-g@2U=~wg9t0*CCj_7up zU2Glq#Cp%at0Gy~6H-|+YJ)qE{c$V+atT^zYI7L8TWs5bCl;s~5ltJIOd-k&(V03_-- z)IHazpQ3qvI6+zCBf^9);P;8|tBi@BO5MIZNx66=772l^40WfnlLm}y{p9rI?XZGCPR!!N4tyGHwTncFn{A4aqTHGu5^7R`uZAT3u z+w1Gc7P^O$d7O)vH{*xHWg)bH1$CeUtI=32^B?4WuesIYjQaRcc;8s>T}4)8^4t@G zUrlS*a8Rs}LkHyPTGpJ6uk`ADQY-Y^CB{3fLd~d z;To0Y7NM|Z>j`W!^iA1W?9v5!=M2`g3qQ-2P4d{dU9cfm7u_6w!9@x9tISfPQZ;S+ z7;I#)HrOz#+0m2#PH1L>BaMAzwPaXn_e{|4bQyHf?l4PQcb3G>U<%0oqFLQ|cXq*y zzsYL(LJQDA*>ipk#$cwuu{&DpW$X;KyN#lF|G1*u=vcRye9`2vU$@c=f<$4W$wh2wC`B3P+ zc_Uo13Ec*^XF>r#ATa!hx7@7=Kpezzjh$@PyWlw-%Kd|>9`kiYvS#P&+%A{f!qf$4 z!~Z-}K#jS9QbVb@>7A(XvLlc>E*vsZO_Y>N2$|#W?@{ zrXyG31jlo?wwj%OD3>VMQo7JKxWCU17%Z7x4S+PHGIl+v3?<}kYrsH53q0OD^zTsEfHt0gchXE#_mHK+K%6Yz2 znZ@f;u&Q%=5f3v08kJ%U;OQ9s8cHCnKEgodJTCjd%1IU!qsxhvz9N^vM|rHC0XOz> zaB!#3#-8u>3H@bgzi;)x<@f;^wc96P6mR1w_)GbMQl0yo*sJs0`0-X(8Z54ctb{x0 z<$@<_S*OsX@8HW-wbhgOac-J7>tiBWu?^yjWS4U zaD$z8D=riRum&!JG3hn!pQsmu_5kN=eUWkg)A#-|@Q8?rtXrq@@Ij8R$!e|m*|30n zSqla3CTla(Cth5F6oo!pqF7Qjs@lj|+j>gc>h}nvT}`O2<+g`P1>_T-GV+J3)yvZ| z2Z%a}y6$s4H`O8T^ha}en~z6D{olQZOWbI`m)dA!J{M7P0rr8Im{?so|K5&3r&S{a zpu_m}gz@~&yJGyqKTjmo6c;OXDN`sFk*P}Qb(NPz>Gus4;}EY(TgDmgOoBp@iJ(Se2zb1+_~|bhy06~c1p+bJSU;dK&UbE0cFC+ z%L~a*SSzYq$B*8~RJnJ2wb(}&*pe}1>Pqo(1?NQwWa%P$%yn40VOXR2!8hnEi-wHt zrR5&Pt!}$o74piy9&O~i(W~vUt>#P&TbPTLk?OgBEPr@lQXJ5298#|6hp2hfLLeS?<0ofTRL=%cBq@}*EDWvXerj3 z9GF`b70OUr?N2qRb*r+KyXwoq;LNA8@&g(wJEW6H#2ez&4c9bMzDP{y1pDkJkET%I zj5{n@=5a<9WtJED?1o;g-r-c|WNO~Usxf8|@NYR$7f7M7+b+U2E*G;)*E@Bfkjo6Y zKt^2a7s;s4HHFN=(DJ}xe5Wc^Ey*q4fF>u9K%rU(c8YYn7)=7~Lp@9|(8A@(EY7La zA_V3-+oh`1vsK-t25Z$D=JKLicv$tBjV}N80J&&vFR3223N7Wl&M<{OAQG8r*`Npb zb)K!q#aZ8qf0KhwZNl(5>{|F<-7GDgAAi(O{oKn?4n&L3)Y<}+bX4E7k=guY{wpbC z=g&GFsdOB1Tv}H1F*7v%pJJN-*=mo;TuyfaPHs5hR=N-Qp8_1^TNYQnnDMJi398fWQn#M*TK$0b(LdOZTw74}7d zSY78;t5__d%_OPlsju2hg38jaYVP0!%X;F0o>_%`4GgJlJ$ktt>EwN|{wOtcVh_AD zhz#<2Or=mzaAS>n`6940%e;M6ZU#HEpo7EEP zqa~FKZ5rd8hdRfV8-&ElLb;~QXOS>D$c{=xXLhOC6235=gU6`*E7C1N#}} zKq|pCbA^ab?<#@H&rv)S_f~DC4NBqa2-wNQ2IXyFjRM3*I8bPG48d^Jb#ZBmPk9s zVYh*LPzIP{NQ^O`lwO1rm1yc|Lb2BbDUw(zM+eWczWE$=1OA!JP&)yOhDHlhzdE-L zZ_&D#ZwF%z*Gbb!-=RCjdv!ys=G!5jG8p3r#&cpwD!1bnUwn^Yww3<4XSey!?>tg% zGrOE+N3Va{`q*0@3_voDwt8z;kX4DKTPDstcvVj(A7tc8Ajb>%>X6LKz{6H?meYB( zf2nOC9+dIcAQ#T4Zwtrk8!COr%rGOxAr7F5Ew#`>%>7OrS5fL-_O1O9h#6bg012hP z%0-^lGG2MvXeWooJ6YLn{OhfID@~KqBAT3EE0(e`BkDJeU&Sk~h2Z5A|xokPNPS$tiPDx#_@w^8Y!hz5x+Wydd=kfTw zWz1FX>(OSTnG5#Vu-fWLo-!i`@jkJ6mNmDP6Iia2R zjiG3aXX>>RFIJq;mC9Ri_Nb^KzVidaB+kjQYTjYj-pAjKmuR#+;_AxbNlS!U0a;1J zdz=~R6>K!=$LG0MvEHQXPa9>p!|3I(@-K^%487R1kV^X2R`86Q{^}cAaQXT8OipEA z+EW+%Y2%Vm^7x@y(_|&*xhHOoYVq^&`p|J$AEEoU%nyNGPhzR;-$Et|Dkn}<5Y0h4 z|9N7|PafAc!9)JNPMN40;UJ}32sDV7rJ@=blz9cai%kZo;b4fxER#$QwxPW*aw~N# zCId_>UH(~7)?pBwDq^5lW+@j^=fZHaCSUlj_IlV%+ zWQkS1UA_7YtTx(W#~cQ@hXoyAENZk&kcrI}!LqM}^N;t5z-ZzdqLdMOHRR-6RZzh; z8*6D*deMCIcwLa&(E*PdFYwV~6&&VtU_24BV+rt^W<5n4*i@1tdiq9`z-{i+j;eH|N>{ z!kx)DAOqjHN@9#shtN;Iw4&@dZWR4RftPI^7DGW7Q_npDk+3Y+hM9mTGG~w}cCHxM zkHNBQgsd6eGz_e%;1~*CAG}fBibatHzd%>*qEJ-sW+_`-sXAGex z%RpImb`B!!g+QEUzuf_6PEIqtaf+?JmJVRYdC;jMkvetL0h>0cK$;XyqsxKJne@#j z>M=a4fQT>q4hjo3FN8Ew^Z;{L<%C4!7X^*{d6FTFy&n`|Komczrdy^Kn?)MHrNF!U zlCMNK{p|xZvaR;eLyN+do`=KUOPnZ?$*vPCpZxOk77hfaLxa*e6M-!NA!tVT1VYC_SG?&O*;1ZC(s zcC{S*Qf|ls{94(tIxd zkU6f5Q|9G*@@<$WxehBi=}H><-eiSyvy*3W;(WH?=3oB{07$v?EFkqPH(R|KdEuHV zhhMWhtYfh%*cn7fVt4dR8+`iQ54JFUb>=UfqMKD#fM={1hn&)GIpy&9+HA=7c%Xyo zYkMzhVcc?SzL2a=h}6P!B(c{b+HS;j*x+%JCwhn*_WlTnU%c7fj3nK_g^nS0wYvGywXo2) z6%fYR!@#AspHQJ%e?`c(k@nFYlC~*75(s{`x4CY`W0zniWeIbE+*FYi`%bEN9PDTWD2_Y(5y9nl~%b(dHMp-%i z8<$5Ur^JvD9-_(@DdWeB?iCfKxureF2d4>*EFsN(VC$gaFR299s3xPGa1jIu!p^TB zakD_ytrNO$I-=;|eJ`?TnHnSsGLjViZ@X+`*GAqbWJ@W$VBVY?@VYKA_R6x0WD!Rn zht(FIg5|)<_AUe*!$HPojySmsqe=>H`Qxwz#rgyu41$qTs;1742?bSSEMgmP#z57b zHO6U&h9{m*B$HjIyNB?xuWA7plRAl~xPp`k_R zQ{wj!)H0NxrHqRb|5nKuG#gc`Em<0dF~(wI!JKvu5EZ_93s$wPq|JoN-yBV)prrGU zDs9P=JF;9gI;qFYZmV;;9>%srw=^}{t*pjQom%_VK$O{5FtM;G=J06K&%a^Ay0qWj zk2sFKBk+bsH{;4>vku0lIc_}X?>v8BPS3(nSW$wfaqpnjM-6m!RFp_cU&6%GU;oH8 z14z4Yn{>9-3f9#xgjw`_2(lQ#H)LR)oD*;A+UyjZLG)nvKFqMp&eFS)Y>+%l)ig-; zJwGOTd)|7}J4Kd_G40KPlwt8fBlP)B9CsQior8G44zPA)hs)27)68H@2aU6}?1lvZ zAd2-?WHj9EWL|rZc{O9_!;jwITSZxaO#R;A3t$I3GI2aPU`5!{CDWqOPe`g@D`gty zv3oIgGg=XQBr}V~_!K=9pZd4~xO>RV8rXK88Q=e$@N(IIZxNX732ZOO$W_(jWFgj= z+X*?e(Tl0RVdcFxjHYy|ac6(aCS}1bC&v(7E=}mvn>P-~|1yXY=dPH-oh6NC-gF#5 zOg{eIiMJixVFF3Z;xiV{o=r#tC`QFfjPXI72CZ;*!G*9?Qu0Y!wl;&q+V`@hC4orS ziLati`2LT`t))?4tHcwJxxOeVx3-v`rrOF0g<;7+Wh9zX*F1F{;4w|c-r#ck+kx4b zAOq(4qX^Oca*;!c%e5Nc7DMx_bUg?JTF%ZTUP#egI<1pP0s>-9XXey9=e96dOlJGS z&~scvZn8ul$rOEe-yL#$Pra+MD97%HroHSLjNq&rNW$-laTkGR$vT70c^d%=t_)&^ z(P=*g)=QC1pQpCF4>s-v-1cDaF1l`t8*KdBaWrh!PPl*^^5r`q{nin|?xm5li>^C` z8t;hRf6;M0MtcoxVv+(FMZ2^Fw!CzmNrdkD-uxc!Jn^0_eZ`yRl8D6x_9@Mwf35G%rVB4>0nW;BUv_-PUD%a9gC;mPmLr?M?+vl ze8YJ(Dr#f+!-vFV*`Q=?=1@@Pq}=p)bZ5|Al!){N+wu5$^taz1Vs6t%kU zRV(SJ6JnU{&NcPw`&DoMF(3%NeAUe4!*oMZ+4Y|k=a;1?G8&u^LdS#qA|MGat$2Y0uvT30iH_0# zdC-7h1Ww5OXPH6&zuy}7x)<|a^Kmy9A~FQq%crP-bUfWz15~Ne5K>_?QmMfQ6hDN) zQ)h)_A@L?;-^gYbnIHjC_r-x5uU$CYNTzp1vgJU#2+45b*rv^&%F0lzhH- zk|%EwH~59KtB{ag_w)4C#>Pi@S$T7Q|H&knyOb5S>!>x1L7Eq5$tC&|L!KB7xVexg);_<*Gy;|&m-(t z1BjSj#|1A#HX;~7tO>YMJF^Y%f%^&se4h=c7jVXD@JO3svGM|MLXqLlLw$+2X=c;S zaPsyF$5?h4Zr@F37_R2_Ti|-b&kn?6xiJ*8iCa%O81E zU^BhvlEM+ekKjRW<*-5TQ4)g%FbYU0pNh(7YgUaix4N0B%EW0YY9*Qk0Y4O2v1Y@> z<%$S6xxB@# zD|M)=tL&3*rWt;2wAx2FUrWcV`9&QfxQ%LabDBd6uhO$=6SEIrGY9eDE9fR@MZB#f5QN%%{EIL-G+J6U%{3dm>ueYjITU*Obrz=xc_ir3eHlFiev^rFg zEUOsw#K&;%ef$=`7RTSJeM0&dS|m0><9AKiz3S$`&>Oea1>Z{<9jZup6{JWt2mcpo221P`h}``k{5>}0Tmzc zH-5)1q&UBORMUc(C2rXR5HeC?Vewi`z1O?xs70@i9}u#;rvz>G&m5}Cld*Z~z-}f& z0M!Q@1U4ps=jDVbY#d;MbsIKA+!c;*OU$1#2MGLGx>?i~krTCs{U$hU3*_u2EwV*w z^K`(2MsYA5d1?WFY0B{=H;U0>=M}sT^3K#Hd1tOkyeYhyv8AF|AO#*a#@w3;UW7gN zLoc^(b6GYFMa>`)4(ajlwSY}jyLYiDnuNR-loS&3E!3XR7QG7+U@e*xv&zr-7$?&U|yZRQY?2s_sM9C&LkeXmK=kKsgAMUpYy*Qf8X<7y2 zQzX&cP5i~=SNoIe@)BcDe}h2_%w53ENpcS-;HCmsG!|$UUZV*XwmW`kyE^TZ#|S>4 zZ+ZH5(Gj4sIr<9coA-Hte~dMhzyCXmkMX(a;Y)R^Jh`mk=E69AAa$0jFqHH{KAVo+wCk49 zvo4lN_ueFK>y*n9OKeezvwDb(Na01atIBiQmV?1=9$M!eGR+DFP>+~Zx~`4&alKQ? z797ZQ7)gsR>wkd3?p~POjf**_ZPXv@N0&+DRp zr&9mQ$B&FQxaqHdoGp2kEU(5L&QUiecz>?mK19{uh6~iFjK_9ZjEBpsRF#`VB%QbW zwY;yAXQoD~2#r8o;`XA{x>`m8*pcK0@3m*EC*2Tz3L)$NpkFF{7J?8{-m$* zwM&U=J`VS!JUIKU8E33PeU=l+XYBeNG89vqWzP#y|wgB~Lq`Y=B^>Bh~$?|lgOP zV3y-pT_;}8bzEr8>W}e+8pGq9PK)5BenCAWNr14hy=AqxK1)gClxDs{k)g6R;5DM7 zc~M(ePa_6XV7ANUO9+S=4}8`$!4m$->ZJbl*^?(F^j=1OiYBsT+H-D(5Y+a~Vx?Xa zEzN4cfDx=~=b$byW%V&3Jo5ciI)M-_iNS0#La|F545cziFVbq7 zs(r+%J%R8L<+HC;pm`H%_Jiq{5w^hzRZ2xkJiQCc53C7#)pb;2fnkz$Q@cYgZJWJI zSbajFddB& z!L%5r-%le82U|$j6Z#X5sj)Ao;%(|7ua<^iOMK1Ezz=HA zJz%UntSU1NQuSd|$w4e2L8-%ZhrN;=ayX4GoYY;iOt;H;0iHt~slt#PvJc$F1h|om z`ISf7szjM-wz+*ST(03Y305rs!&u<2I zWn~$}@@=^_B6X}!No7ij12<1eH+FekPCm!dYyT(`y*k-sPFJu$4tO^GPW8LC=PfC) z^&3Z%GM9W5X%!N$-T~6!SRp>ok1wc=ZA=Htmz$oTK&f%|U4~SQ1*yk>zM4Zpz%~FX zj!%vVrNx=Q*v=HU;*nOd@V|?G++f$@STX|egWl_0z`S^?sv4DIm`LX3mHAB4H2akc z;^bWsQ4B9lo0W1VND&~R_XXKoBq)6=ZDo(L?2Y3iSiI9 zUR!!HXRm*e-6n~Jc8kY(E7RyXd#PtjL6KUvnQFfE5UoTv0N73h6S-|LP89sX8g1uxR9btK+joD zvTSe^+vIv~>`+V1+m|{${Oya1S9$1ikxJRm^p=xYTNlkB-HBT;(rh_#v+O-P7^lkWk%V*TDte0c; za0sviqwG-{nIg05K6U;Uka>g?mm~Q_cgZw8w5*Kh!RpV>e0Jt(2g` zPp=q5R`U@f3@gdWM6hsnaSEXHDldMkH#nGAaO-tG-STc^k)kfy?FdmM1{Vs22wknh z3(!?r#$VSzzC`eSO_cwXG7^eWQM0bFWV>M8TN@|dux|Wzg#;dcO7tU zOn_>%Zyp-H>~a;pNf`e`j_kM2wT8Ej17=efNN9uIq@r8`g7X0n8V&?g0ZsaWa>h?dp{mh>df zN$7=mU_#o zEron0IRuN^MRC3Li|B$2dbd;8trnWC#axTlcvlsp?hJ2qvBMkK$RW-jv|^7@UHtPi zPS{LYK`wo~zT&po$grTdwKXxe5`9!4j3Ydas$qRiDkemIhO=ym<7G|M_b{Qah2i2s z{uE0~Zp?3!Yd%s620V=)ceQ7Y{|vZ=uo!9kcCz5?U^oiq1DdIVTPl-Br@H6pNs(CUuT?~pxaot&P2+QS zB^bYHy%c^gGYSXZ06e&(v57oNp7l z+_AQH{y!8a#WW=`s&qRTnTlyO(xWDEV7UuP62x9B5cEvCM`I5tCkOQQ8b7zcJ>9yJ zn(~bi+-K~}GaoPhGHu0Ki}>_DVis1DlI&DPC_`PngL|jGSH<-iaPtbK^w*+6JOJpS zRsi~II^In*ww3LYma)V7flanr`lQv`yY!mt1Z@vR<+)H z`!%zyrU$nmKQ0!$x9d>!-r0?Uz@zrt${2qwC1k-8;jy~%v16P^0li9Bqi}StIC*hs z(`HSN>T~2wK7_ii&i1k?c}{1l)&Z??N)IMSQ!;%>cv-Pu=}}6Hl@_OQiSIrhw~LUc zyx@$kdC7Lr(FceXKC*tl4QbG8)MSnQ^--;(I@>4?hg|}u_5>!LIaR``;%?oiA=IH% zQU%2o8UkzScx{|3D?u^Qidc3|E*j$Q$n0djGqs3Z^U~T7_>;o^xdXRL(^s}um*94s zIjy&lB$UyWR~y4;0Ysk(2r(dId?nRJYaHtgL!a544!&NB7aV#Aj-STE2I|lNTC9eY zso!y?RvdC2%}0bGN*QcURKCm}!zcIRz>Mv2l5X;dn+h*LeC`jP{~E8$L{ZjvwSHwf zm@QYc&6NX`16<0QzZi8bLX0tTeUWAazgjK-7bVLAAwnLBbF8OK>C3+%PjGP1iw`a$ zLEnEp_-|nFyD*T)J$VQZf&Z}~L;)y^uhEq2KSsoV;fzH0u>qP%hY3%V{>Oj9Un2nzq;wlZ45+6i4{QJ#md3*6k-WK-|_=l-jzuO;S{QH>YDDT&k z z6V?^`;|wSfDaL`sn!}NTkQkR_>I>wVNJOp};ltZTV!{Vnb||C^mmXIf{r`UyG6^6Y z6*~WK!Bh~56@Vas=UUjM_((WZgN=%+2alMIjV&mmeG3Yz)ZbklSGu1E1+q~CRzm_j zIsFYi2!vSq!0~UNnU7jz$QsYQ7PuZlZq^yf9GdL5d^J=mB&2@d)CuiQ8&t2q=FuLw zxhzV-KQk>n(%?scEO;X5b%n;~$&-txHERdcXC162)I)pYi?U!vl39aBO<5XsM>f~^ z=8dU+Hu8y1y8ws1zydLxbLpr*6A$^&}qa=Vp>G$39cYo~R za%a9A?jlE4t6xaBrulw6a58Qfvj#KdJ1-FR=yLuqQP1?~qfNOpJoZcgLjJ7Y)rT(s5uST{E)~K#0&>>rpL&rgL>JME~io5ILoUuDaO&O zn|n4oztoV$l9hh48q*rvtVAD&+{OHQ(EN66Hj|f=007kY5Fg##d{Zcqm8ViJ*8L>( zJ_H0)(3f;7MA-CNk>v)xurKCQV)b^L7>GMSIK>E9P^vW!ML{?;fYmlhI7R`Ch?gba z5LmE(J|4nX%+qEM)y`ug3+jkD<{vrUXdPRDROE+?5_ zOj;6rjv4YqCL^gK=Q4EU%y$S~5rpIGoM*0nj@iz~@gkd5>uvypJoYe~Z|<*MO&C5u zLVbqQ{h8q#rT1H}N6#!Bnz~fjt-bH&QaN+>=QUrqxvJa|oFr75&hx+QOQQ$4b2PTL zi*UxNA4nGRo#zKlLM@%23Fm4y6k}2oEI$g{{f|7vKVzx>&eO#LK=_q8os`G`;s?r{ zCfWWp-n0$NO5iFj;Yo3f#6tpDBPE4ubz%TT199h1Ibh^=yGXY^g-__k@AC)-+_Fys zxSA9SloVgHTI?K?5-=GScH)^whvO468eq7Df~)~{b-F!r3Q3!BJMJ%czjoNY94O|V zXK1pGa`9j=7)uu612mK@FflE7kBVc~p29Ab_Qg3W-zO2hHj2^^_64_&XQq280tXmwC%;{Kd%r;&y z{{7}mBAG=vZb0xzVtif0z^(u5^PB$h+lOHX*^-t_JqHL?V#E&TfH85(%9yY-U84+& z8#MAc5-cb*8nlNElj{}I{gxklm5L1ZQ@I&+ZdBL{dn^K6_m~2vtIdST`)w45rIOCro!kqxnRJ}o71Wn65(gx3eZuo^tNJq-ZW7iG>E zlfcCU{uw?k(_ z=e+KcGuy#pXa@SANQ@do1M{=X|dtzl>N3weX zKRN4t#9sFB^KJcFZ^Y;*zKjF)01bUU@Diuw7J7~98Z+b|z25Be+$=DBd z-v@EHeK!k0NLfwPimtYMSXZi3HSS&;4GK}l$eD`t!CECP1|Bh1nW2e{d08I=&8F)| z4LT0(A5kmaq%~E1s>rxnx|q~_9WQksx=FXWhyFZ^lm-!N+455`iOv&>4W7!ui^j^=bEkT2BF z`+3x{?ykQUy7@DG`H3Mu=J**thjEv+aRt|1iS<>{S%Pvy<67x=GHA3yQAO`a>&Va8 zI_u^7lZ(=vdJ}*5!6%`Q&!Gx?XOwit-Kf{rZ)w``EaOi_XapCWRa+P2;b~ zD?~C{ytgZFCNl<)UR)j}E(0i2jB4%~D}{Emx7%Dv#9oa`s(ZDtom6i*WC07&O{7e- z&L-=PFvpUKV=nH<55a)S@!D!xM0)LJnw+ZfI@58wq|>rUFZE2(=gz>(U;p>r1F+Ax z-~+D*kL_n0P^sx==pLcoXw7~@WuJ`%OFwQFs5}pJ=7)sI@^;}Iuz2f?aNnm z#Pw4Pbyt1I;^c+*3+?xM+Bp*pQ6|^va0v{+wEXBMsOwbE=i3WHWTz)yLI$G-rTWhA z?FHyGBa_9~k_x*qUN5Owrqwl>RtV;GTqXCe;dHR@r+O=x?S5sGMnH?=W3az3^1gud zBcEV6KJV*1-3HTj)L#eH>YtuJiWQHW5zIeOL!hROR#Ghoj6PUiG0dT6ss3~;nQ_C) zUw=4LAzm(I^{Gjdx6v=G+v*&or|ncI$QPLRxr|C$4zp8ao@`0EJDlxa57r)7PByB_ z<&k3H3tjZLHP9;;YXA#I?4WgGgPvHMiv!|3h+>A@taNj;q|QOV z>DD^aF&bDhYst>2QM>>`v*>ptOmzyMyE`2^JQ9yJgQ3bz`Jb84 z7j4`!yl$Xw&3G(YfT|2sJ*_h&I>dep9p2bsv&?n5^$u=FP-%GE6TDF~`$*6ATWZke zWK54WzT9A+GEtWMu@?N?mVc1G?y5oTF&)5Tm#BtPKjb>|Qb)pcpsIdJDb?inJx=F# z^(ypmIA7`@sB|4Rg)E5Zr`1Grm-kDp0L~+E>PD2ywi~#JcuPQ$vr!u+@%AP8(t{!) zHl@p_SG!X{Us(0A{H;1&f{X^e1{*!;h=2jMTS!GgK{hF+Jo`Ac7fUTMF#*Rw;Vt+! zOY%rz&B((5;+n5~#ac~rcd1-Zpgt{R*VSTNv|H_&-DayUCU2_7rPr$eNb8^MKZE~! zvY8mvV8eyEO3>RYr#saAjfgSlwVGU}Sd%X?DJK#!QBjmH5bvkSngPSsH#sY^dm9$O z{bg<7wyV8;sqKkrp(aq07j#(>*rTQN+<;Pr7O2a{Qj2`UccZ>M zpvP#3)}r`Jlp+Q(ERet(WT+S5{Q__d`x?6-LbJqoX;h>07+|DK{+I>#VofWfUYKgi5VlX(^LToo3Px99=Rg*4;WIt# zDS2!MC?|b(u)iXD{k6|>rOTnhe_a0!gSL+%}OVfF1& zF_ktRWMmvt1#&4mz|F9bVyNyI;bsr0M^Oeb5@#OB&f)fgognt!9cg4-*7@#@Cw|PUx*kh1N5+IyO%91ln^4*@KWMW4(4&w zoGjNT_GL$q1#?8DVhsy_*bf=@v3as&)6%A9pmJphN<9bz#jZ@8=*L>(AkHhtw&jfI zD-MJu^Oy&tGTFdQ&CUH$y&ub+zGj}?^NE}Ax7;r`5byI)XO{SwN0w7YK6F^i#2Wbt z4dc75T$9VQeu2g*ir|aeWoP!h#87gni(ZHhwJM{;WMMW=NSQ~+kwGW(E0o>X7INf9 zoVV}LAjwpq+Spmj)T|M?kX8Dvo#Liz?%_4m=hryP%#^xMs&>Iop$_4BW*2-Vb2FbhT`Gc zq(_?Lf+k~yl)rC|OK=Qx40>N?*O<2YB5!e|{6O5(Qz2~NI__iQSozL@!+wNzx!`jy z8KI=Vo(t>~nlaaaW!X)B+xfdu+xd9w<<7)8`t6BluSdr1ws9-18RX@g$614!Xxd(R zBb{bM@@WuR*^UZ8HS*#imbbxmDbv%eqC*QFNrL_E<$1MM{f&<>oG=mJrUVvz&`=$( zRf+{A-Xst5nxszZc<{6(!)G3#r}EZ7#|KC&2g6hUJ5^~pKMXMvtV__^D>R>3JQ@xl{Omy0IV(IXHd}R|*0mIi=;rq+jm!Z_9D0Fd z+Nis#W_uQl)#G2#0rI6#@2MQ=eI(7S1qWcXMy-SSsA_K{?^~fpomkhZ8o8z2wjrh# zMFij%C2xKam_h4|Fdr*SEIR^5#Ok1Dl_}0pOG~C0{>v8)ABRaUf#>n6AGX%eqAQiq zB_8k(V4#CJd5spScTseAyP4H7)2o)Zt9%@`^{;UrlIPKJjTHzgyO-R91iCu-R-=k)G_UPCup%o)6zS17A?dL<7zF44%#8| zF@H_hm}r4-=APIKIWEKU-v*?c?RyRTkSM4(Pc zW{pYtAA>dRZyUNjf;Gt6wi0eWfOS|^AOJ6kn%mWb1jy@I51Rtd{i+C;z6%H zy1s{8XGhGVK~)0$T>gSMzdr#)&9K$vPZPdcO+1R&%WdAFy@98@L+S3mhd;_R9ukBj z60m!z<0|nSag_e;*AL@t4ZuTU2H5HH6#nYz-}V4s@c-&g->uD-C!ejacG4r@7?}lX zWA_!1-X|o9WPF>4HX5%;>UAOfY0&7Ve!g+4mj64w0;vT6Q~_=I@0S2vehgqG=hnF0 zX!uX;7EF`eiO%$7nD{>dWUmWRz(xH(?ev+83UyXO3kA6rgKf4pcAJXsPcm&g!u|0T zH!$bAT4kM){`@s%1X;)OT`# zil4%!JH`G<8NP?C0xqB$1Rf3l1Ajk&A|Sh!kHHV_)8LB(@mNs-A0J^rJpJR7R_I8` z$Wij-(mnm^tfYeeq0wBF7E4D`-z_ z9tllv82mmQ5FtA?>2g{xK|o)x_?xro5)5?o10vOf3Gglf(u@k5xP`fvUiH;ONaUxT zI*9SD5yXC<*ygTk9imW#5(Q0a=7m+kJiA9*JR%AyYT#Fruknr5_82V=+X1Iup7~H| z6egX5&>rG^6rh>*_F(m{<`6#ab=`MN0Kf?^Cd+iSWgO^q$P;UO2!S6GIxb?PR#{kB z$O;2E)4hoAyR+&mosGs)kLzqdL}a>MGeK1fLub@#W{h#+$R*e^-hbGP?y$L;VA!0g zE5M{t=q|H8+ldfg>r-7BXeMjG&%x%X9htH)9xMvrzFeSyf!J)5 zqauxQz=-hUJ?iJz4>NlqlpKw8cU#qXC-!JLYT0fzX}&4kq{v5D&s!ic2y`|_vaXl+ z1eJPypB|>(LJ~whg5Ht|TUpy8?QBvN5Nr+qvdI8F>~W1QK zZZqlOO-W?rdJ0F>TXI;EW8|m4$&~?5&efxcH1TdfTPgcR1o(kDQF{#y$*7ioNCj3@ z`zg`qDd3DoR7sig5x9_%k;?q7{{NUc(J8VJTg5>1nWUBFrip&4~n_c z>81trKhW5Sij%I!^Y?uK32WZI#ft;ZG%#Qgk_7}8Kl2sEk28e%3w^ea>D;3-94i6Z z4mXK`CbNVo~)nqTA+CKcJ- zc0Qw&A!>=GR+d6}NwYDr4zQ^;4S>Zakf(N2!<(G~8?8i+wVt@%*oO3L|52laJiRCq zztzp0w0NN;y8r=%h$OxxDjg-Cg_zfR1z~aegH1%r?|;+$=t51*f2a>5gXXhFe*NM8 zkfBaS$|nLp9t7*``KSi(PNuNx%B-^0JbIz!In!d@9kw>+OGt;($^1W`;+da$F>a@k z$V9W4QyZhaOgNrZ3wrbw>YXAj%2%DTl)x6Gk;bEBaa}a(Viz6zdz%`YcW;Re?T_N% z_he+mhYw*u_FvUxRr2LSNl98-+B@^9zUCeVPXXEj&@vA*&6i%fF{#E*}>aXwO&6cMz=w7u*Pwpz<9L2NTc2k z>Vd~73Y<#lM@>g!)oUzzj|{~1L#TSWc`s?m6qk6wEIjn(OOY{4zu}Ma)l~(8Y(Qks z0j^mZdN#NHjC-q~+u_3=eGs9CwW9dh0P1Se29XRvk7+h&Z`YD7`$Mq1KG|@593p~7 zLxR|gx2kMFl|a$O5$EB=-StF+V+oKO{wmuF*!PSS=TA7{8im+zjg*^=QhDjlmS}5# zLvQQIaop$0f148r0@NO0-LneJVWNBa6INm5y@|_2#zHL~X}w?oo~~+3x=O|vTkat= zg>%ClE1;1?QZpQnnFgF#y@Z5&gVTP`&ZjsOLZ~$4Fyfu$_I9R8YpM&ee?M>~Js3rU zSYK}0nEw>DWeDmWmsvfop3%4}0YM85Oug|dat;ey-8;cW?%yK4rG4GY{$fcb0Z;)& z4PuyjbME@nw{l@?5OJf_~N*pYPKQns%O5ea$C;z@N#pl1r( zrOtPMEQ36J3p0}y+3|V5GM!hdnw*dQZ*q2%j8dKHMC^YwUmmR{BxvvLO#J?t6;kf` zo2LTYFI60-t3Oz03?v^`*CdlG@#Cf;UEZhc{NBDtSGbmmEU&5`Kgy0A{&x3H%raj= zLgpbETuFR@xa8}A*ijCM!c)nUBN{s3+YNvSV(PkPJbgm<0p_eU&!%y#It}43m3Tl7 z(NhA%>H<2Z$C<9@d!63F_KF32@u0_#+PXm5D^lQQQ=Yv39&Kh9>*;4*omx?1MmUNUgnn1mSD&i`IXZ~)BE<-cTuag z6Uya7hxNT7i;umbzfB9R)BGLrFpZshm|fRYZJRSXf|2itlq)6e&t_C7(;wXvEhYPv z0bk!gkt0|OlmYYZh`!SwJnTE?w-jVFRp0BsxqL@vUzTMNC2JLVpq={#| zX}z9U0}^^+2ZiK=2M^$Ae>RP-h)f#o)HPDs)`JknnXl<1YJk~`^=(3}S4PWrt($lw zvE7690_J5tr6&rwRnul$u(H}7oII);*V!@?^OE4)8f#C!J(itf|X zN+YbI5zE8*csS9|15o(cUNTahZ``HojlZN-%d43Xdr|OOHZl5j!#0&VbMs4!c7`~j zQx)}G0z<%Ie7lQX84^t3 z^5jAe3O-X!hR4{z%}2G+178Hg$!hv%JM0{L(;Pp(uBn#mJ8-^>+`hMtjDh(1sP-3- ziP#1wSM7NyXlPkUHJAYaa2a~X@KO&V)(sq$^q5@Uwhh>q=!AzAuX|0u(}>r0-L>y( z9_t$|(6LAyV+63wGenDGHHvgc9>%@yDX}OS{D{M~t&Zo){i~DKm}4ul=}9y_y!I|# zRHlyvpUtW@jHoG)PF-F$)Ogv#0~LU_MdvOh2pl7U&Z*)6zrZT~DRU+g{>o6-Dt&@S zwb>gGqd*Vk(KyF%BL(x{%jWf_tO?^7!}0e9zH(RCb~!}ojnioZxdDw9gq!DPqRjN* z%LgX3bDi$WJo(B89u1(MpVBn1GsKUo`d5x*6aX$i?_Qwofj*3P1!Wr~!wTsFe#}q> z#tQe@dgi0^`546bd64hD+GZ&12FinP7UxU`0A>fHv-ZuxaWE}r$B(2&$}BY&z(Ll8 zB{BsB!8U&7lxUS4KN*rP;!8#34wNC&eE4!4q^9-74m52^!4v95{?GCB4i<=$%5ytyo%o zL?>HVbyXg~!-QF3@+otV(Z*>3L-FL7Z;#)l$5t>C^D)t1=SUQlH8tqDY3u>VO+JJ< z(XV20`BP9{`MYb&5*UeD$+AgRb4HV_0MjGENYcq@Fqe7(qr2U{PgVa-b-O%{X#+EN zF0txIWqxAd-yuBoA1?s%kBY$J>F3ICzEpH1fzl6ge!3_R@q$hI)vB^SQ!T_3o%9ln zD2fI=_r`L(y{hWe1KOZ7C0W%H5fuG^V%g=&iJmVd)t!aFZGI98 z=RP7?Ow2)t-6`nV&ZJ~CxxT!=N)J6FRa;g!9q0*APXF4^2~=OmE-`rsEKy59*8OnQ1;vud5g4YjR9vNbuDvQL2NjCknwi(YKea(MqCH+F z_k4fah~suRJBi6C-rs$fMXY3X@g+L?hvHT6eW3xG>8-+eIc@BbDk!S4i@Hrf?~%y6 zuU&CFR@9JA7XI@OmXnt&qFbkK1L#v0bF-;6+7W85RQlomH!h3Gaz$6RX-8kS3JtVwlkDn#1ADdWl9zV;-iIq zu9=o;gA%w;gKfr(EjK=53$7N>U&FAqJ(j!Uci1TZ2t@giFY%4~}2aImOj zpE=6EjU5Ey!q%cL6pADeDntRHn{mW~#y6Ragfc*q;oy?8!C~E_XQDF$acZXRy7KCz zQ5qISTzn`2w_3ING@Z8FVV$a*)nE;%Bj@oSEi$pD-J1axs? z+3R@K^Vk4b<&t%`&tyXNDhP_Zm0Brk*MRxJaOjsO=Si&f_P>^~m`*$FHJe;UA$|h0 z9+#`w_BnTHZ<3EN@YkQcRI5Gvn(!ii4*c-vcKI=8;NkrIIom874bFQZ16^13`t)D+ zfoYyAJ>C3^sJjX8>cXA3bCkwiwa^w(>Iy$)u@Jr2VrnkJ`Pn6}QQYgH0py-UUXP(f z%b~6e-`J#wR!fB<%B-b)*?yQH)4!u|kg;fj4w13jZG1h10LphGHfmD*`%!h;CbEUf z##xJ8>QmbOFOiG}BmUc7y|zn2K^?CVkk^H5Xd%OJLVG(s0_B5!(yt*KJxw6>%{@9Z zFZFM|WlOFvRncoC>E~KfElS95auYi4<_Nqc0$k5M}e<(>0uh zWrZZH1hdSgyQv$XdxrFNo}tCs7lYXV16{Q#K<9_IgecpeCp<1n4ZRc%h(I+{3W!6V zf|W4ndz+Hki?}>Uo-1msZOOa11^F&+#Spj#LiGDOGM8w_*!toD0zBI$dWkHgSY}Ts z^gIO1StUR~!xYro@Wp5co`jev#7%)VoB2;sh0TJ%4D z$4OLJRmSb1R-{(-ZnD+I`MAI4MB@^eZ&!38+zfD9@^19596}eo(2<%vB#R~SC_#QO zY;9Py6Nk2UD=PWTSGk68;2IED@9{&VabO{Gg9=j@V2YTVfR(-^btze0eqTQE2 z%wJh_6&H6^la#T9uq^j@Pt3iOW?uCU)xuJTpbzx z_2*wNKoJV`GdS7RTm2`S@)LXy*1taZ*NgvW_h1XB^?U$94?xyWDI-SJ#;56I;`=Q!aZXONjr#ut-iW-|1U?maTd#;5G5(4A{O^B>ngJgaS9_nU zBpLoK|8rYH0`SiNX{QfIxfd?$&a;Q!d_$|-hf5sGTf+5Z-`Hns6*QVgoUApAVKWzQ zS7+vq?mW)Cz<#bXVd~fsS9{NHre11%z9^^Q7$sykIL`E;C&;({g%t+U-7k?b`=)Lj z|DSev|MNq@H9Q;V#Qo9>)%6y1}?zI_`@pe?>}_`@j~QLHH9DCLKHe^D;1o= zx@L6|5M6{S|qy!jF41jz#Z>6z~pIOY-8}yJ%d=_QY?F3G&NT)C^s^t$vG5L$ zi|{FZ!4qscq|c8L5uNC|X|1CHJ#;tDTJQanR9^9eA0`vIK>&f8R3jyHzwlZa)aILV z+*naD>Zke4E1fNwOwt5MiX+B91FOu;yWW9ZB%1qrSuWd*owC4l4>9bVr-?TXpqb+H z9@=666`93)QnS~+*jDb=!Du|1FIj62jKiYg@Mq9+mm;w!EvZ<1EYLLvP#bAm%ccbY z;#|*MlWX58aNtxe2dY~7rK)3CstN5MC1XrVeEUXQrxuONP4T)a`!9N$R}4>9Kyr)$ zz{xHSQ!>?K)aZ;~$u`A$50H6W#pKSf_%Y(@&ei$q&^0|>2Oa8ItbL_wR2w)XV@5p0 zwz=dSSGF+(QFFC1Z9sms=R ziUq|0#%wcAt|~P=r2+!XSZ9y=<}?2)j-Q)TRRyb4qo#62F3)nxfBg7S?lLOI4osvW zqvwL4&l^NKweh(}z(Lbgg;9+e;O<0k+|t0M5{E)OIm<4~t-goL`jsBUn6t0TMjY@Kjh_2NOaljRt23`ul zYC--B&rAgO*=LZ1Nugi{*52QetT_js%h}}IVJtg(R5Ek`fAO(u$PtYa;GL5e1tB9u zuQSx}I+9rMM9)V4?!3jZHSWB!wJJOS6+aq85mJDWLzU)!mPkb~$+g>aY8`p+bqbt| z-9+Dv1 zi>6fUXACM7)t2+w&uXI=dur7gJnvc%9%^b}P6L^B&(OTbbFP>OgMKnx*5U-L{Cm7P zD4%k+y=`ts6K+EdXn+es_M%^>V+5Kj2hYOZ*21%Gg!v6h=r(f5-CPf7aq|Oa=Hm>7JaA+R(x<>Bd z1y0?;4{;l#c{;}c#$aa5Y0hg$@r|(U=*HcG{0)})@%-Hw8+?k>tI~ zkw?->k(geTp#s77@Wg7)W4-B>V=dyNM`AH#lFuqWD87^~SPDV?%Fp{Wa(&t{cBw<$ zcRA=BAyt~ol%Jp9guijz8Kd}M7RxDqy)U(BK7L0km^L;8;Qg>O^JzGT+PFvCxz z&UOEGkDw@2KIe|MH=d3`j61x^YSGPjHv3B~^?7Cvl~ccq9UhnkQ?i7)`9^~&4Eve; zkIxeJxOrZ>Z4;~4m@&8=?3tXqJ%%T88cjCBAXqjjz63;25#q|8whT`F1GH@UuOJ&1 zfpLB75_$YRnlxk;^tpu7A=f6#P6=bbFz0`MNvkyTigXAka9;MwG_woGH~?i8j0p=$ z0zOC!*Umjb`hLZ0wcyw(H~m%T4W&kI;0$uJ-W(RSGEG972Of=+)-aAa^vy12fHE@T zm}VvCA#P*FZr|CJMS3Kk1oDNiE<8TG{Xq{eEr_r7D4q!7|qYJ?fVAz~(sOgIE&?k2&TCTU> zmKfY~-EX1>&+>3HM~etS#3725qjPM*48jO+T)P$DgVEGGqF8>9F>_uzxaZaL%3im7hpjSx}vJ7#3RyTdNR1Uk*OGvlih zCX;8|)VM5#F@XFtiMFqJjQj+XS!}lo`ewD~U?1P!$NmTaRndD1u;>gu*1C($?Qrwq zL}W$fQ{3Zh_FbFPl6g|eG(ysY(}>lvtGIl&xeBVQoZj5{QAQj485!u@6&6%3t5{8} zi(}VP`_oeLP995isgo-7b4j4DM*W9*8V{KKtE`hL-$fe({m7=0Q2tjNw;XaVg!Oouef59 zs;#rxFKffb^%lFYC#&5HG-RY{D5@RUK}UDldM%f_3N2~b3S_|U2?xY^g_7`t;H`AQ zDZVkIX2tF5waX4E07%1}QZT7hM6rJJT~jt%5bP5vujT5~RoUHLZ4Z`9b3P3=H6kcX z`}Y1facN)6@Gfl=nvN13EEa0B4*Mo%!2$>^&%IH(3w+S`-`Tx%8@d(j;>5Ptu8k-? zE7qvf0Nodi=K{*^KN?%X%AZ&`53uFCOXPh2lIAmcf2uP}3iG6g)QC~v25&!SVsO!y z(DQzJMZc4aGQFR5w#v)+I-}@w%3!&S;yrZ?3!lG)!EqzGo~f^KzGzIeR%mvoj`Yui7iDk{?jyaAL#dt(A8j)a72J#&;kjz zvX(k_i?xc{rYlo!r)B_~`MU4Y8+;6=L~0VPQCFZ6TiAl3Re%I50i+U1*MmiJt7(+= zXI|INlEJRiec?5DM}&56uw5B)0k0#zSNZYIV;tvwl_}HN$G$>x!JWP2&Y{j1 zkd5ya{V}Z3z#u$RhmQYJhjKog>5UZOqM-T&+iwLWT4(*4lB%r?Ur}VMsxlcZRgvd! z;yV{>{*=TIgq+QJVzlPEV}C$EK!&@qe<07de8Vg8`giCKEI zwq?$jxB7{#ZbwlL+F|)*b%I*(Tc6sXC@XOGSCCkhSvWnhC1h8t604_HPi>$me0_9O zfYrDnkHaeX17X$<+{*XJS14x(tPFpuj|M*oeHBK%0^PegVMl;$~a^)=O3s|ATx3Bv=gY>@cX_uuZQRxRB@ z#;~1VuU5tyWWh(A=F7sVve8aBer>;eA?ffKQZE(`imIbWk4%7Y{>AaU6BCEB2%8fF zhdmgsa(2?*5gpTeU`0Pw8#A$+OdgjKdldSdu39Z6&5Egfahz+qa!j-5fLXnCeiq$x z!h&1F3SqJaXHyP4LPQXsFtI1T8OLe~j#B0om(}X$*3+2<<0Wen^-xk_YNX>pO}yez z`#V1>r|3)gxi-YoNa)2bYZ|VFYB1gVeEXQSp5haMT?^k7_x1C|y~24tSLpVR6Aet) zyY-bp!Q@m8Ro^<vK02(I>l0b}>l5I1Z`&zu>c0RF?;6Pw!usR-O9~@-UjjwZ+~}6h!BC!llNez+D;r zvLBZO{l(Gx=1!0MWUa`^okyvdrM^&~n=h`tdS^*KzM&dskT}FXXoT z$qR3@`RgML%B^3=rvj_I)dG)S6!Cprb1VBM9nDplHLn0q@kay$1d&Tmk=dtGKKO=hcpY6Ap@4ZZ?h0gNL63ypSFDbtx*r~WOJ9LxOWMV#^> z!+W|iK_zsP&y6CGovEQN2w%P`I}_dm|N6oxU1JPg1bI9TirF>+M|NZkRA=b zz0j^eRRk&qF<(4ceTO%J()2F6k}uKX9?><}OPzoFsdeL#M58`EnY7(wK~xXPYL5d< zb194nFMd83c+OWGB)Yu2c{o{wjWm+0A{pwO%5Uv8SA^-O}Y@5jM$-_RShG zk!W5`cxuv+()D|&)0IH>&kjp;_01Lkg4f)5PCKr&EE>U^aKZSQU4qu_H_viWX6&vtN!8H+-#NB@ z2y1Ah|2iQftof>zR~%?4jenMB_I!D~N21OPdD1U5lBj@QJz~#<{n<7Hb5-FGMj>eV zP1-&VIFFq6_6HnNA=o^5OJXRvqLNZmmMFdi-rn-9>4$8eeNckCx1JJ{M=|cmv=tF( zH!Y6_0bYe1s&h%x&M}zQ4h5D057v`zoT-s;;cE>V`6R#_Yuf&aG&7y%-IA)=1F(q3 zz{T~IpGED($`9ZLBU@*U)*5WS+DOh-*3Fj0%sx~|7ZO`b?kKKR@Yf~8rIDipYU+3} zNlDuAL1#s@ov^RDUi~scE|_+y0a}v78bKw?9`T)t5_Dnh?axgqm}8+69@R#@Pw;Nk zMM&369W{lw^JOe#WGm(N z-h#|E&JgA)0%jczqf}v5hY@{*HvtOHxNQ<7&Gt7nflqPq&P}E6FGf+RJGorKakrTOR#I=j!*jL`e(m*dP?Nb!`+m4=fq!hF@CuHlBNm%GDl8%CgYVJu-H%%lI*lOCsq{45b=CqD2j@cB(b=& zZiSX$r(PGe?bc zkat_kk%x>#5oD!G1mgv^Dp(22*)6G3;ZZ9T9~_leqE<8Es8HBW zqAY5L4SGecatV#0ZOIa$=;w&K?2mT=zL%b8sPkApQsai9hM`9*l51RrSl_`k;%io{ z6dtVxeCvIaCAoxRnXXI>-M)r6BQD~8N8B@MT>Ljp2~6}QQ@SviJQ!@h9-~8&6HxFW zRPDZo)^(?Z4DUzj!nWFdlgnv+2xf%H5ZE+?u8eutJiHG49qjC^DFxgjG~ssWNw#<% z1DiT0dgLwgu=^zDI0ZYL#3PwYU}9S@)w2qN-{B(p;2Qh{divxY1vy;T=HAVLa5zC* zeeEUY`e6K0eSW6F7X?CyqjoIA+f`V>?LurtS*MdfV|rZ13dYsl#S&R$rJK+Z8Mrrm zV>DExC6O&C2o!l=$yn#R&Yw+T#NmLlu#3{er`PfP8XjZzfxm`Rd+UE11mW1lEw2Tp z7U!0__!Dp%+-Er&t;iWQh)e1VXDijmP2~111UUAcLHjd0;?cC%@E9uBQf;Z4Eh)IsXBHDKV~{{gD~2aCLD{PS-Mx^?)7Ay{$tFp zviUiPT3GqbDPD^tqP<#XOdlHYzX>W%X>HPzETkcxb)S1b+)dv$#Q8yu#D7$pps%%% zDLGGA&&zR|fCe9`>2FN-w2rj2oUY^pS)7uoFt#ba)AQ^VKa76t2?L<0 zRncJ2ZtRJ9-hf&x%BpIe`PJ~d0$rXxb=Y@RmQZO?>ad=4RkNzgEmoz-3m#A- z(2%sV!gxicth`6{UgBalMp@ygS}E>bQ^r%hqN#ZP?2o5zO+j2#c2`-A9FW+Z@!rJm zeVHU^IFVG0YE#DQE$-8fUn_~>e{am1nC8E*Q?ow8oGoF^Q}Ud5fme8Uv5-A$fzaKd zf)ch>$=}YCueraGWnZ_@V%vLevysy0f%Zm#M)ZAz^7K9hdX8(-lV(3~TNX+(hlP`K z<^FJ=ZTm25P^3A{lv`g$>#(|F6UYid77rYp8x;aYGeIJvN9Do9ZbLFbxoW!2oABA$FB=<1#P54v6 z0S*k9vzOqH#!P|AX3^ImB8xE8#F4zV`qRRCh8fwI%9o#+I7X)A9|NLd6hokZ3V0XrLszZh( zt3Rdm>t#D#uNwJl|1%Eh8V^U#$00T%Q)Dy>7qeYBVJjbZdtJH18OQD|=_PfT@U>*& z+04!oj0YJ`^7CSyFJWNff>Jrskz_%nVU>BRR*Y||>hJhjJox6~&a629oQI3b$5*1P zQm8c&75^u%lc`1RI);LC9ultxl2TsA|dkV zKJK9m%xO~H;W8Cmlz%&4?RNCyY`0LK52Kj8&3UZjYGY1MgCEmSc9|hei3XN|hZxl_ zFUQQsh`neKUqgahtF&`|k1QxaBV850dD}^=1j6AG1);gWmj{xkd7ZJ>VA)Jx6K=L- ztgC6WpbBvpUhpg9Kiy|ICyPN4q$%KA*w!btYrz27!KDiAOGBrdW-q5$<+`KM-ltt3 z-uY7{t+{MIe|Q_BdbDhZN9r5jX04Eb9yIL?zCZT`F5+-D5dZ~*WYW%DcgpH_1x4l} z3uN78!!U!hM(cqEHPvD{V?WM?DXO+$ykI#u^l`MVzL#V;kV(1&%mqZh?D0h6f9ewQVjD*IsYQnm|=263- zc~+k5M)>??zyq_l;*)omXqJW~6GrE|nw83=yY|o&R<9(Qc>ZsMMI>V-9b!!0&^ZtG zVe8}CG8z48im39p7TMJ_1$Qyu?pL`{8!|>J=BOK&TBM|7XGCO#G%Af8AuW3SF`MYTeW4i}9tb zNZ(5&ZZ~QEP>mjVv33D$*+ilERjr8 z7C!sS1U-FK=Z+lx6OFZYkH!bJ47$6U1D&gVSed*Ob5EQEE$w_1w-qiY zhmLOSD>8I7k|y@o@70}=91`D(e6E-E{P0?hKTV9RY(82ssT0Y9O&o*Ys5{3{a8-qM zn3=CRqq@_d2#uvD?OatlivkO~NQ^vg_xg;~5_yi9JY*9(eieA|y2)zv=(X#2^R*Sj z5s`V(l8Wh?s%Gy8JGnTcTeZXFt9OA1Z;+)C?!3CbIgR4I`|AF6C51*YNq_&PXx?FI z7pzGs^EDDlEB(AM3g1oUr@06CcaM{vNEJJA%SGlnIo)->RDvn!jyHrp&l*K__ycGO ztSF{cAY<`IBjPe*k*bqqwNMnlAY-JwKyhpCCQNh#L1x8bJ#v$ejU#$e;y4p0u-U&D zcmXjk{ZJAoJLIv{;bgAVYHcKsHq|duUk!*gJuh8h>>@Yc$WY;VvO%=S8dUyMf zE7NBPysDMIL&c7ncFm}}D+e66ZxBacgNR(!_%(V{bL5>6{Q`-Uuevkhw2JCAo?*m7 z-wPPo$c$3dH86CLLKS33H3suK)hB)sPp?9tZe#Y{<$#aVsL0^6#MzVY56k9Pr`!3t zpSyMH`#t@l3^oKQLDZDrd(a z-{bda3RG*@Quu;TU-&d4`Kbv&k#w6vJh=fvkwA*J&qB4-;eoD2_R)xOJad`V4J4~Y zyQ4$Ho>L=yiLND-H8P=^*3gk+AFmGOvH(2F_3AR zUZ#WAa}SPRm3}+?&^^Bvw9=bxK)=q`F3C+zLvU;@_`C00t}_r~lb~lsW;T{3VMY5giJ%)TdB=;^duy1ZdI09eM{3f=~}M9`7n>2+;MkD@6eOB&`O z@}@^D<4I8gU!Y>Qr>o$65f1lmo(xCSYNLqL`K8*8ET@)W~)<@)&-VPm>m zVRya}Sa4z$#;OqIu73BbXG@5sn3yJXj&XtS#a()C&?uzsHG3{D-vsTPJWak}Z#?V1 z6Hnm~XlU8ri-<=S9LMBGSZS-#hsqRt7xYyrR+R|?ELod60N9Vz(OKAs=Ty+Ii8k>#XXQ2PZYvbdDfx3 z{SgsUovya5d1{ib>QdtceH1K!k%UM|AhX`X!(HsRWcJT{hk(jYtTsohQmK^R+O#|+ z)&}%VYPW+fwNJw|S)M~&oJ;UXNS86P zb!#jD_g~U4k!qAtLsBLk#{bp=0D8Jv>h~&|SAX@tE0ha(3mP`xGdI5{{=sWSwfiBv3rv23 zLjX!u#YK-Drf`$2N(10|WiuIg{|~+6rBS#b4QTO2!y_H@Pj$yXrMvjxbykD1we&$H zM}go2^W09^Nt@4IZWu^E3RBbdW7dt@H;m!u;i^r1bSX+&a$(q4nMRo}6*_yzNq@Nfk)n(P_Ck-yybvz@i6bQjD7P9y_ARC|IeV;`#HzSAj&*hfF3cDqt8WE=0$CxC2P9uUl zJZh_-M^kie&+DVrN@nY|_o{~%?m;ogC59ew z_fB`FwE=X`zxRX(4feh52fy1}w}U6~hB(ZPMd>x2X$vr;U3h>vBIo$L+{AFWA6wg3 ztL+kG?(ut7VsDF&Oc|$&Da)$bH7s9Fe6IM;h))pY{ZPqAada$XJKpyVh}Q~spov$q zGPHK~4L?|v3E`wsSKhsxPbB?IzWpo=_wZ@=6#0X3f0PWM5luQm`>g}c9Cv>@{HQQO z?gzLcqj-S)Ec#+(0iuq4&->CNgdfOq;+7QHpyy;DX&Oc)pY$HcTWP>?blNpae=6kp zd3Oe;j;(%kHof!~%+YzcK(FPk>2M0Yk|<~?Qvq*3Km;5o?EUf4hJt9APLlmCpAP^m zX+2v#Ru2QVBL!d#|5X7eY&J~xt`1=4`G(DgzA2{b&DPqy>y@6T29Q63w(p68gLGy<+8be(w6tUHNMn@ zByd)JpPTN;XI7>s2$06KCihL=XBRFyGV%1U?3#|^sAK@7myA#$O@LnW^kQ4u{Yjl) z=_mA)@x#iKQklfTjBZD%OeCk%o@1S7Yh_J%v^GkGQ_Q z9r9~ky>W{_yO?+QXsvg>N{_Or<~-%}_EOA+_DMuu5?3_nPc@jy{jSFZTSA17j7AFAV&vmavaZtkN4JiN&HoE^y!_A9*ac~yqfh3yt)!QdNz zeRj~CY(EcDqy{^XPR{OwTZ}%$spJgufYD&X``{lA0Cu$paI9s0eEqA(_jP-1VUUBU zbZ?Ruz*iat0%Qliuz;a@su>97XX+iA1#&M4TBUWg0X&*#kz46;p2C&3*jrQckDs-# zlTl*08=I-Gju%b}+pna;E*G4!@n5NlHiE`HA|m3J-3yIH=!K0t(9(u2VQ#Z~Oi=!S zjcn$IMczG8Mmf{UK2XwdIU+r{xKamZW|Ce`z=cdC}u`V1~z%^GsB z`0~xRt2zCE@e6~Hw>>3#cLvq~rz zGniJ6_r_bJ?gMEtFf~SINqk8PcvAb)XY69h+ipG-46@cSWpD_O2R^~w)i@-N(z&W3 zkjnrG)<+ZkU9)trsR=kVRucI%Mi>~)+10kefH70FmJk?aj!I&)xcUg4m`zG+F;r%N_{f|A!(UF)(fuw*vaH%A&5O20# zF@`5HhDlp3_LC~jKCf5+1`S1}Ozgm)J8b}kfnKcy9@HrBu@FsqobJ^v$p8<0PAP*# z5KEWCbcuFZD#C}vH3|?pK#&@3O1dv*?xU1g;4s*F$Mt^E>%u!1V}B^(xiHBWf$hf< z?f_vjWs>|G^lnXnbbbT!$RrsTiYV$U_6vk`6bdXV)jd#}%uO*}^WW^ABdv z^m-$+MG)DQ^grJIR=*=(h?^?u;9m7(M*e-^>7y)@k&i*bp%ho>J1rW^(^q4qZu0X$ zY*OVm$9QxTo?PRy3Nhq$*h9~b?l)+7_?ckjBwg5)tK3+{c!LvBHlqyyJuRW%<;cWZ zj;W09^?upEm1Tj%d6HZ|;$EylEb?XnHK2P`=sG}B#vAtjJ8b)WCp zU6XcmLiPnz>huInAS^16Wwg^5(=7wFX$v%~YFq5owhp#rpI+WQ^NRO(iCLIfqKoNq zRE@V-=DdP77e!?b;M@k_e!mL-yV$wLYNk`4Fy`JV3{9|$|eEWsolybl&2fItfT z=|fNww8PczFI~rSZFXpLjKRA=eYe)9EG&S$HPOyLUE`D&c;6z12#P<}=uR_KDx=Zz` zSNOwKt^Z{^*ElpEj;*?yfxkE3FsQKwe$YtvZV*k*x`{r#zELgFlF$Pz0KD;E3Kjm; z_R$O)-0ux+M>AU1bO6Gcm2m8MJ)L7GBi?=Iaj2`D|p5tIl(JP(1d$9qVPY_+k9}$GWQ25#Y{0 zEuvFvV?%od0hda3`_#Z6Sa9w$i+J5AD)|>6K!^+T43w9p%kzcHdjqhqwK zFTK0LO6ZwDrfjk!38q6pKOD_$CC%Y>*)IP^*LOHyXGbZ8a9YbIQOSrBa|Vikr2aSH zqGH<&(B|U919_U3jEZ!~MJIs5>!guT5eBY~<*`TW1S-XDsN)S)?&CBCnnksmfy2q4 zfHQHk%#;u7N0td`O$IeJH5TCbIE-A&5nypH51b$`wkAC%NUb_`&}^j}n*#>Dq92-rWQF3vDr~29ABsCAEkYDp%CC_>!yeOBTGX6m z*e;Y`$kpQSJ$m$HN?QEQdzNh`Kyz|S}ekRYqPGBDCXP)RovMf#~8$-Fu7OGf;Z|LH{l-oti&L-R%t@{Tq$Lr-; zsFtol_NyS58pP5*Y`UV-5THf*{WXirc}vM<$tq*2TR=sz1UC&4Q;U#*Q(8;WwGOPt>x3#)!Ol!~BsUrQC`orn+rWW*~j>nvykB z`(4$rUCG*MJ5FCLfB5d)L%LW+=zgMpWO81pGfZTri&Dpvr#^%b8-t zf8oWKgg(9WGfu*0VaYIT=i4^;Bq6T=x^#Hv;ZnC(w^so}Z94u&2>v zc^MnyGEUHUjwt-K%czwo7W$?4immQky9ehxLS01KHAoL&jE( z^^EBK)#eAAdT*QaS8B!i%Ci|LjnN?BAgNvmGFQG2+&Mi17}Vrbx^%&%aWwU0Wgl7> ze`B9ts>&LC)^-q&Vo=p+Vj8$ys`)NajTJKuGUyB8*_Y^QmSfgSkFA6ikAJNkAk+p1 zeC8`%P-!W-<3S9NSRIqYA{Wq~7tDs7L)VzW1Ye2YbsqX617ZC zLXjWFSTS9pk~84z(H@1dv+nG|^ihRf3Ff3-dtRq#Rft;$(lKUAjheMq72s-WyPcEN zbj@%~;o*dL$beykU5?xGmd!Z6amjYBw!2;`nLzmxtF-Igq*jQrew;>cUEIrLHITot zcFV~q$=k-s&*!^T=OR>(+G#VpaPWPc)d5T0GK#(F!9HcC%G46s0@tYFyc%ozuj;Re z=Zj@_LTv~)k{V(cRl5m+uZBn;NWK1!|M0w}j}*}%JE*L(lIEoo+q{|ggI7ZT`Hp3* zcxom-s3)jx45z+Q6g-O=H)+%QQojHqB$Lm~>1-COXX>##YkuIK9&U~@0wQ&U97vz@ zRM?;D)55GEfkaNCdXBo61JZNFYC^>b_@tL}v2#X|kJ4VzrR&0)tmk*r)dr27OT>Zv za9Ugo@%N9hG5g^^_O490U7_Q>^=DAs4-*WvzwG4Zpv3gzR`7 z*&4386r-G4MU0SBbuT6 z2hU8uMt&l0>91L^g_h741jo|EGwN6Ag)qA8mR8(~J8+LE5(^M`%Xp@iG4N_jfq zon$z$3Rub?i1uh*J_pJ;Ga3uxw3UI_q6D6ie2SM0>kNg*=bB}TUss8XnX4CI-qxa| z%Y=)2LlC)+d@yTC+?`};xeNj9gI5fJoF=n1I~fOaYw|YcI^E&;@QXW@p|Rl<0X&5cVDE2J_q zPYlwr7bK?$O$^4PO-x{dE6z(n z@C}?SyY?j|^aYFF7i2wQt?t24^v(R}y8k(chA}w7<*U(lZOHSK?dh_-xHc&3$ds$X zLAuDzc`Ap`ds!pZZfyB|L=ypNcX@mWj( z`(DxardYYYS)~?Fi9Nc8&9D$ipPiUg!js%@NmQ7&Mf-?Td>X$GcNLx9`vkBOUF((0w?NpJzk_O0;lJut0s&`Xg&xOOpSC3 zbxVd+IgW_#1yJ|V*=zwQs)%NT*nq@U((m!Zuj*H;#S5rBl2mCQ1K$q#TZ#QdNrRhb zQS`+a+eaMHC{nYLr=H0ROdZ>> zU`a(#9r(IFf$tIYFqyC1X1G8>Lhc$ldWk0Ofv}ZE4}X|&Et9*el}SHz;LA#CDC=4y0<&hpFAq0QKMwEQ-=EY~Up2U% z?!=Fj33=W+AMpscc$1cveH2ddc-UWM1qck0Pyg7y`P{ZS?(lSZNdD0azr7WOYi=KH zg8avG+AdW~h$z_qeAx~*59d#zIoOLbzZtc^mi_Zdk-AU%bAkxhc>nsmKhKIti;yT31_ zdiFE!694b}?Emh_f1eru3p=vBfims~3Oc?WSMnX*tDiAQ=RMeeJ1F+@KGW0IuEp^p zP`RFmzwPq2luz#>H$OKi{OwFD0_T9bpdeS+-%hIyaJKM1FZ=uZ3&9Dfsu&*n^50ty o9{BLWwYw_x?fh*k$}N2!DBA Date: Thu, 16 Oct 2025 14:59:08 -0700 Subject: [PATCH 28/54] Updated the image position on the README.md --- llm/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/llm/README.md b/llm/README.md index 5ad31df..063deb1 100644 --- a/llm/README.md +++ b/llm/README.md @@ -48,11 +48,11 @@ pip install torch torchvision ``` nvidia-smi ``` - Look for the line "CUDA Version" as shown in the image: - + Look for the line "CUDA Version" as shown in the image: \ + - With the correct version install PyTorch from [PyTorch](https://pytorch.org/get-started/locally/) by selecting the right correct OS and compute platform as shown in the image below for Linux system with CUDA version 12.8: - + With the correct version install PyTorch from [PyTorch](https://pytorch.org/get-started/locally/) by selecting the right correct OS and compute platform as shown in the image below for Linux system with CUDA version 12.8: \ + ### Step 3: Model Dependencies - **Pre-trained Models used in the agents/llm.py**: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) , [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) \ **Note:** Follow the steps below to obtain the access and authentication key for the hugging face models. From 5f3b61cdbbe935fca5f1763fbccd3a12cff959bc Mon Sep 17 00:00:00 2001 From: Hokeun Kim Date: Thu, 16 Oct 2025 15:06:29 -0700 Subject: [PATCH 29/54] Revise README for LLM Demo overview and structure Updated the README to provide a clearer overview and directory structure. --- llm/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/llm/README.md b/llm/README.md index 063deb1..9518b2f 100644 --- a/llm/README.md +++ b/llm/README.md @@ -1,8 +1,11 @@ -# LLM Demo -[Federated execution](src/federated_execution/) - For federated execution of this demo. -# Overview + +# LLM Demo Overview This is a quiz-style game between two LLM agents. For each user question typed at the keyboard, both agents answer in parallel. The Judge announces whichever answer arrives first (or a timeout if neither responds within 60 sec), and prints per-question elapsed logical and physical times. +# Directory Structure +- [federated](src/federated/) - Directory for federated versions of LLM demos. +- [agents](src/agents/) - Directory for Python files for various LLM agents. + # Pre-requisites You need Python installed, as llm.py is written in Python. From 66da8ce4b8270eb29597f9eee49f83b574f65b15 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:21:22 -0700 Subject: [PATCH 30/54] corrected the spelling of environment README.md --- llm/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llm/README.md b/llm/README.md index 9518b2f..15f7d4b 100644 --- a/llm/README.md +++ b/llm/README.md @@ -20,12 +20,12 @@ To create the a virtual environment follow the steps below. Replace this <> with the environment name ``` python3 -m venv -source /bin/activate +source /bin/activate ``` or ``` -conda create -n -conda activate +conda create -n +conda activate ``` ### Step 2: Installing the required packages Check if pip is installed: From 67cf0bf790eb26e90e344112e8ba350d20296567 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:21:59 -0700 Subject: [PATCH 31/54] corrected the spelling README.md --- llm/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/README.md b/llm/README.md index 15f7d4b..39fc3f0 100644 --- a/llm/README.md +++ b/llm/README.md @@ -19,7 +19,7 @@ To create the a virtual environment follow the steps below. ### Step 1: Creating environment Replace this <> with the environment name ``` -python3 -m venv +python3 -m venv source /bin/activate ``` or From 18a8548eb07770b349d23ed9b7178784671d36dd Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 16 Oct 2025 18:21:17 -0700 Subject: [PATCH 32/54] Changed the comments and removed the Hugging face token and it will be taken from the CLI login --- llm/src/agents/llm.py | 13 ++-- llm/src/agents/llm_a.py | 37 +++++----- llm/src/agents/llm_b.py | 43 ++++++------ llm/src/agents/llm_b_jetson.py | 102 +++++++++++++--------------- llm/src/agents/llm_b_m2.py | 36 ++++------ llm/src/llm_base_class.lf | 119 ++++++++++++++------------------- llm/src/llm_quiz_game.lf | 13 ++-- 7 files changed, 162 insertions(+), 201 deletions(-) diff --git a/llm/src/agents/llm.py b/llm/src/agents/llm.py index 93322f1..1d88658 100644 --- a/llm/src/agents/llm.py +++ b/llm/src/agents/llm.py @@ -4,8 +4,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from torch import cuda, bfloat16 -### Add Your hugging face token here -hf_auth = "Add your token here" ### Model to be chosen to act as an agent model_id = "meta-llama/Llama-2-7b-chat-hf" @@ -31,8 +29,8 @@ bnb_config = None ### calling pre-trained tokenizer -tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) -tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, token=hf_auth, use_fast=True) +tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True) +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, use_fast=True) for tok in (tokenizer, tokenizer_2): if tok.pad_token_id is None: tok.pad_token = tok.eos_token @@ -40,19 +38,18 @@ ### since both the models have same device map and using 4bit quantization for both common = dict( device_map="auto" if has_cuda else None, - dtype=dtype, + torch_dtype=dtype, # Changed from dtype=dtype (correct arg name) low_cpu_mem_usage=True, ) if bnb_config is not None: common["quantization_config"] = bnb_config ### calling pre-trained model -model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_auth, **common) -model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, token=hf_auth, **common) +model = AutoModelForCausalLM.from_pretrained(model_id, **common) +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, **common) model.eval(); model_2.eval() - ### arguments for both the models GEN_A = dict(max_new_tokens=24, do_sample=False, temperature=0.1, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id) diff --git a/llm/src/agents/llm_a.py b/llm/src/agents/llm_a.py index 15411cd..0e888bc 100644 --- a/llm/src/agents/llm_a.py +++ b/llm/src/agents/llm_a.py @@ -3,23 +3,20 @@ import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig -# <<< put your token here >>> -hf_auth = "add token here " - -# Model +#Model model_id = "meta-llama/Llama-2-7b-chat-hf" -# Require GPU + has_cuda = torch.cuda.is_available() if not has_cuda: raise RuntimeError("CUDA GPU required for this configuration.") dtype = torch.bfloat16 if has_cuda else torch.float32 -# 4-bit quantization +#4-bit quantization bnb_config = None if has_cuda: try: - import bitsandbytes as bnb + import bitsandbytes as bnb bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", @@ -29,31 +26,35 @@ except Exception: bnb_config = None -# Tokenizer -tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_auth, use_fast=True) +#Tokenizer and the token is automatically used if logged in via CLI +tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True) if tokenizer.pad_token_id is None: tokenizer.pad_token = tokenizer.eos_token -# Shared kwargs + common = dict( device_map="auto" if has_cuda else None, - dtype=dtype, + torch_dtype=dtype, low_cpu_mem_usage=True, ) + if bnb_config is not None: common["quantization_config"] = bnb_config -# Model -model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_auth, **common) +#model +model = AutoModelForCausalLM.from_pretrained(model_id, **common) model.eval() -# Generation args +#Generation GEN_A = dict( - max_new_tokens=24, do_sample=False, temperature=0.1, - eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id + max_new_tokens=24, + do_sample=False, + temperature=0.1, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id ) -# One-line postprocess +#post-processing def postprocess(text: str) -> str: t = text.strip() for sep in ["\n", ". ", " "]: @@ -63,7 +64,7 @@ def postprocess(text: str) -> str: break return t.strip().strip(":").strip() -# Agent 1 +#Agent 1 def agent1(q: str) -> str: prompt = f"You are a concise Q&A assistant.\n\n{q}\n" inputs = tokenizer(prompt, return_tensors="pt") diff --git a/llm/src/agents/llm_b.py b/llm/src/agents/llm_b.py index 6acb7d9..621d43d 100644 --- a/llm/src/agents/llm_b.py +++ b/llm/src/agents/llm_b.py @@ -1,26 +1,22 @@ - -# llm_b.py +# llm_b.py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig -# <<< put your token here >>> -hf_auth = "add token here" - -# Model +#Model model_id_2 = "meta-llama/Llama-2-70b-chat-hf" -# Require GPU +#Requires the GPU for this model has_cuda = torch.cuda.is_available() if not has_cuda: raise RuntimeError("CUDA GPU required for this configuration.") dtype = torch.bfloat16 if has_cuda else torch.float32 -# 4-bit quantization +#4-bit quantization bnb_config = None if has_cuda: try: - import bitsandbytes as bnb + import bitsandbytes as bnb bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", @@ -30,31 +26,35 @@ except Exception: bnb_config = None -# Tokenizer -tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, token=hf_auth, use_fast=True) +#Tokenizer and the token automatically used if logged in via CLI +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, use_fast=True) if tokenizer_2.pad_token_id is None: tokenizer_2.pad_token = tokenizer_2.eos_token -# Shared kwargs + common = dict( device_map="auto" if has_cuda else None, - dtype=dtype, + torch_dtype=dtype, low_cpu_mem_usage=True, ) + if bnb_config is not None: common["quantization_config"] = bnb_config -# Model -model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, token=hf_auth, **common) +#Model +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, **common) model_2.eval() -# Generation args +#Generation GEN_B = dict( - max_new_tokens=24, do_sample=False, temperature=0.1, - eos_token_id=tokenizer_2.eos_token_id, pad_token_id=tokenizer_2.pad_token_id + max_new_tokens=24, + do_sample=False, + temperature=0.1, + eos_token_id=tokenizer_2.eos_token_id, + pad_token_id=tokenizer_2.pad_token_id, ) -# One-line postprocess +#Post-processing def postprocess(text: str) -> str: t = text.strip() for sep in ["\n", ". ", " "]: @@ -64,14 +64,17 @@ def postprocess(text: str) -> str: break return t.strip().strip(":").strip() -# Agent 2 +#Agent 2 def agent2(q: str) -> str: prompt = f"You are a concise Q&A assistant.\n\n{q}\n" inputs = tokenizer_2(prompt, return_tensors="pt") + if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): out = model_2.generate(**inputs, **GEN_B) + prompt_len = inputs["input_ids"].shape[1] result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) print(result) diff --git a/llm/src/agents/llm_b_jetson.py b/llm/src/agents/llm_b_jetson.py index 40461ed..b57e157 100644 --- a/llm/src/agents/llm_b_jetson.py +++ b/llm/src/agents/llm_b_jetson.py @@ -1,57 +1,47 @@ -# import torch -# from transformers import AutoModelForCausalLM, AutoTokenizer - -# hf_auth = "" - -# model_id = "meta-llama/Llama-3.2-1B" - -# has_cuda = torch.cuda.is_available() -# device = torch.device("cuda" if has_cuda else "cpu") -# compute_dtype = torch.float16 if has_cuda else torch.float32 - -# common = dict( -# low_cpu_mem_usage=True, -# attn_implementation="eager", -# ) - -# tok_kwargs = dict(use_fast=True) -# if hf_auth: -# tok_kwargs["token"] = hf_auth - -# tokenizer = AutoTokenizer.from_pretrained(model_id, **tok_kwargs) -# if tokenizer.pad_token_id is None: -# tokenizer.pad_token = tokenizer.eos_token - -# mp_kwargs = dict(torch_dtype=compute_dtype, **common) -# if hf_auth: -# mp_kwargs["token"] = hf_auth - -# model = AutoModelForCausalLM.from_pretrained(model_id, **mp_kwargs) -# model.to(device) -# model.eval() - -# GEN = dict( -# max_new_tokens=64, -# do_sample=True, -# temperature=0.7, -# top_p=0.95, -# eos_token_id=tokenizer.eos_token_id, -# pad_token_id=tokenizer.pad_token_id, -# ) - -# def agent2(q: str) -> str: -# prompt = f"You are a concise Q&A assistant.\n\n{q}\n" -# inputs = tokenizer(prompt, return_tensors="pt").to(device) -# with torch.inference_mode(): -# out = model.generate(**inputs, **GEN) -# gen = out[0, inputs["input_ids"].shape[1]:] -# return tokenizer.decode(gen, skip_special_tokens=True).strip() - -# if __name__ == "__main__": -# question = "What is the capital of Japan?" -# print(agent2(question)) - - +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + + +# Model ID +model_id = "meta-llama/Llama-3.2-1B" + +# Check GPU availability +has_cuda = torch.cuda.is_available() +device = torch.device("cuda" if has_cuda else "cpu") +compute_dtype = torch.float16 if has_cuda else torch.float32 + + +common = dict( + low_cpu_mem_usage=True, + attn_implementation="eager", +) + +#Load tokenizer and the token automatically used from CLI login +tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True) +if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + +#Load model +mp_kwargs = dict(torch_dtype=compute_dtype, **common) +model = AutoModelForCausalLM.from_pretrained(model_id, **mp_kwargs) +model.to(device) +model.eval() + +#Generation +GEN = dict( + max_new_tokens=64, + do_sample=True, + temperature=0.7, + top_p=0.95, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id, +) + +#Agent 2 def agent2(q: str) -> str: - - return "Hello this is jetson" \ No newline at end of file + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer(prompt, return_tensors="pt").to(device) + with torch.inference_mode(): + out = model.generate(**inputs, **GEN) + gen = out[0, inputs["input_ids"].shape[1]:] + return tokenizer.decode(gen, skip_special_tokens=True).strip() diff --git a/llm/src/agents/llm_b_m2.py b/llm/src/agents/llm_b_m2.py index 45bad45..adf4e15 100644 --- a/llm/src/agents/llm_b_m2.py +++ b/llm/src/agents/llm_b_m2.py @@ -1,10 +1,10 @@ import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig -hf_auth = "add your token here" - +#Model model_id_2 = "google/gemma-3-270m" +#Device setup has_cuda = torch.cuda.is_available() has_mps = torch.backends.mps.is_available() @@ -18,16 +18,16 @@ device = torch.device("cpu") compute_dtype = torch.float32 - +#Common model kwargs common = dict( low_cpu_mem_usage=True, - attn_implementation="eager", + attn_implementation="eager" ) -#4-bit on CUDA if the device has it +#4-bit quantization on CUDA if available if has_cuda: try: - import bitsandbytes as bnb + import bitsandbytes as bnb common["quantization_config"] = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", @@ -41,25 +41,21 @@ else: common["device_map"] = None -# Tokenizer -tok_kwargs = dict(use_fast=True) -if hf_auth: - tok_kwargs["token"] = hf_auth -tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, **tok_kwargs) +#Tokenizer and the token automatically used if logged in via CLI +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, use_fast=True) if tokenizer_2.pad_token_id is None: tokenizer_2.pad_token = tokenizer_2.eos_token # Model mp_kwargs = dict(dtype=compute_dtype, **common) -if hf_auth: - mp_kwargs["token"] = hf_auth - model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, **mp_kwargs) -if not has_cuda: + + +if not has_cuda: model_2.to(device) model_2.eval() -# Greedy decoding +# Generation GEN_B = dict( max_new_tokens=32, do_sample=True, @@ -93,10 +89,4 @@ def agent2(q: str) -> str: prompt_len = inputs["input_ids"].shape[1] result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) print(result) - return postprocess(result) - -# def main(): -# agent2("what is AI?") - -# if __name__ == "__main__": -# main() \ No newline at end of file + return postprocess(result) \ No newline at end of file diff --git a/llm/src/llm_base_class.lf b/llm/src/llm_base_class.lf index d1eae4e..ae6f1c7 100644 --- a/llm/src/llm_base_class.lf +++ b/llm/src/llm_base_class.lf @@ -1,52 +1,5 @@ target Python -### Reactor for handling user keyboard input -reactor KeyboardInput { - state th - state terminate = False - state eof = False - state buffer = "" - - physical action line - output prompt - output quit - - reaction(startup) -> line {= - def reader(): - while not self.terminate: - - s = input("Enter the quiz question\n") - if s == "": - self.eof = True - line.schedule(0) - break - elif s.lower().strip() == "quit": - self.eof = True - line.schedule(0) - break - else: - self.buffer = s - line.schedule(1) - self.th = threading.Thread(target=reader, daemon=True) - self.th.start() - =} - - reaction(line) -> prompt, quit {= - if self.eof: - quit.set() - environment().sync_shutdown() - else: - prompt.set(self.buffer) - =} - - reaction(shutdown) {= - self.terminate = True - if self.th and self.th.is_alive(): - self.th.join() - =} -} - - ### Reactor for calling agent 1 reactor LlmA { @@ -80,8 +33,7 @@ reactor LlmA { } - -// ### Reactor for calling agent 2 +### Reactor for calling agent 2 reactor LlmB { state th state running = False @@ -112,35 +64,62 @@ reactor LlmB { } +### Reactor for Judge +reactor Judge { + state th + state terminate = False + state eof = False + state buffer = "" - -// ###Judge reactor to determine which agent responds first -reactor Judge{ - input query + output ask + output quit input llma input llmb - output ask state waiting = False state logical_base_time = 0 state physical_base_time = 0 state winner = "" + physical action line logical action timeout(60 sec) - reaction(query) -> timeout, ask {= - self.waiting = True - self.winner = "" - self.logical_base_time = lf.time.logical_elapsed() - self.physical_base_time = lf.time.physical_elapsed() - timeout.schedule(0) - print(f"\n\n\nQuery: {query.value}\n") - print("waiting...\n") - ask.set(query.value) + reaction(startup) -> line {= + def reader(): + while not self.terminate: + s = input("Enter the quiz question\n") + if s == "": + self.eof = True + line.schedule(0) + break + elif s.lower().strip() == "quit": + self.eof = True + line.schedule(0) + break + else: + self.buffer = s + line.schedule(1) + self.th = threading.Thread(target=reader, daemon=True) + self.th.start() + =} + + reaction(line) -> ask, quit, timeout {= + if self.eof: + quit.set() + environment().sync_shutdown() + else: + self.waiting = True + self.winner = "" + self.logical_base_time = lf.time.logical_elapsed() + self.physical_base_time = lf.time.physical_elapsed() + timeout.schedule(0) + print(f"\n\n\nQuery: {self.buffer}\n") + print("waiting...\n") + ask.set(self.buffer) =} reaction(llma) {= - if not self.waiting: + if not self.waiting: return self.waiting = False logical_now = lf.time.logical_elapsed() @@ -152,7 +131,7 @@ reactor Judge{ =} reaction(llmb) {= - if not self.waiting: + if not self.waiting: return self.waiting = False logical_now = lf.time.logical_elapsed() @@ -164,7 +143,7 @@ reactor Judge{ =} reaction(timeout) {= - if not self.waiting: + if not self.waiting: return self.waiting = False logical_now = lf.time.logical_elapsed() @@ -173,4 +152,10 @@ reactor Judge{ physical_ms = int((physical_now - self.physical_base_time) / 1000000) print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") =} -} + + reaction(shutdown) {= + self.terminate = True + if self.th and self.th.is_alive(): + self.th.join() + =} +} \ No newline at end of file diff --git a/llm/src/llm_quiz_game.lf b/llm/src/llm_quiz_game.lf index 62fab15..9f05ce6 100644 --- a/llm/src/llm_quiz_game.lf +++ b/llm/src/llm_quiz_game.lf @@ -1,28 +1,23 @@ -### llm.py file needs to be in the same directory target Python { keepalive: true, files: ["agents/llm.py"] } -import KeyboardInput from "llm_base_class.lf" + import LlmA from "llm_base_class.lf" import LlmB from "llm_base_class.lf" import Judge from "llm_base_class.lf" preamble {= import threading - import time - from llm import agent1, agent2 + import time + from llm import agent1, agent2 =} main reactor { llma_response = new LlmA() llmb_response = new LlmB() - keyboard = new KeyboardInput() j = new Judge() - keyboard.prompt -> j.query j.ask -> llma_response.user_in j.ask -> llmb_response.user_in llma_response.answer -> j.llma llmb_response.answer -> j.llmb -} - - +} \ No newline at end of file From ec73fce0fcd6b62de5e8f34919e99c3200c8e5bd Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:00:03 -0700 Subject: [PATCH 33/54] Updated the README.md for federated execution --- llm/src/federated/README.md | 67 +++++++++++++++++++++++++++++++------ 1 file changed, 57 insertions(+), 10 deletions(-) diff --git a/llm/src/federated/README.md b/llm/src/federated/README.md index 0055843..99adf09 100644 --- a/llm/src/federated/README.md +++ b/llm/src/federated/README.md @@ -1,16 +1,63 @@ -# LLM Demo (Federated Execution) +# LLM Demo (Federated Execution) Overview -# Overview This is a quiz-style game between two LLM agents using federated execution. For each user question asked to the Judge, both agents answer in parallel. The Judge announces whichever answer arrives first (or a timeout if neither responds within 60 sec), and prints per-question elapsed logical and physical times. There are three federates (federate__llma, federate__llmb, federate__j) and an RTI. # Pre-requisites -You need Python installed, as llm_a.py, llm_b.py, llm_b_m2.py and llm_b_jetson.py are written in Python. # any version >= 3.10 +You need Python >= 3.10 installed. ## Library Dependencies To run this project, there are dependencies required which are in [requirements.txt](requirements.txt) file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. While newer versions of other dependencies may work, the specific versions listed below have been tested and are recommended for optimal performance. -It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. +It is highly recommended to create a Python virtual environment or a Conda environment to manage dependencies. \ +To create the a virtual environment follow the steps below. + +### Step 1: Creating environment +Replace this <> with the environment name +``` +python3 -m venv +source /bin/activate +``` +or +``` +conda create -n +conda activate +``` +### Step 2: Installing the required packages +Check if pip is installed: +``` +pip --version +``` +If it is not installed: +``` +python -m pip install --upgrade pip +``` +Run this command to install the packages from the [requirements.txt](requirements.txt) file: +``` +pip install -r requirements.txt +``` +For installing torch: + +1. For devices without GPU +``` +pip install torch torchvision +``` +2. For devices with GPU + Checking the CUDA version run this command: + ``` + nvidia-smi + ``` + Look for the line "CUDA Version" as shown in the image: \ + + + With the correct version install PyTorch from [PyTorch](https://pytorch.org/get-started/locally/) by selecting the right correct OS and compute platform as shown in the image below for Linux system with CUDA version 12.8: \ + +### Step 3: Model Dependencies +- **Pre-trained Models used in the agents/llm_a.py and agents/llm_b.py**: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) , [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) \ +**Note:** Follow the steps below to obtain the access and authentication key for the hugging face models. +1. Create the user access token and follow the steps shown on the official documentation: [User access tokens](https://huggingface.co/docs/hub/en/security-tokens) +2. Log in using the Hugging Face CLI by running huggingface-cli login. Please refer to the official documentation for step-by-step instructions - [HuggingFace CLI](https://huggingface.co/docs/huggingface_hub/en/guides/cli) +3. For the Llama Models you will require access to use the models if you are using it for the first time. Open these links and apply for accessing the models ([meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)) ## System Requirements @@ -18,16 +65,13 @@ To ensure optimal performance, the following hardware and software requirements **Note:** To replicate this model, you can use any equivalent hardware that meets the computational requirements. ### Hardware Requirements +The demo was tested with the following hardware setup. - **GPU**: NVIDIA RTX A6000 ### Software Requirements -- **Python** (Ensure Python is installed) +- **OS**: Linux +- **Python** - **CUDA Version**: 12.8 -- **NVIDIA-SMI**: For monitoring GPU performance and memory utilization - -### Model Dependencies -- **Pre-trained Models**: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) -**Note:** Please access and use the pre-trained models, authentication keys must be obtained from the [Hugging Face repository](https://huggingface.co/settings/tokens). Ensure you have a valid API token and configure authentication. Make sure the environment is properly configured to use CUDA for optimal GPU acceleration. @@ -105,3 +149,6 @@ Answer: Seoul. # Contributors +- Deeksha Prahlad (dprahlad@asu.edu), Ph.D. student at Arizona State University +- Hokeun Kim (hokeun@asu.edu, https://hokeun.github.io/), Assistant professor at Arizona State University + From 03a100751f827786d6fe36c2c51069713bc1b6a2 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Thu, 16 Oct 2025 19:15:03 -0700 Subject: [PATCH 34/54] Corrected the path of the python files --- llm/src/federated/llm_game_federated.lf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/src/federated/llm_game_federated.lf b/llm/src/federated/llm_game_federated.lf index 6f4f1a8..7e6b961 100644 --- a/llm/src/federated/llm_game_federated.lf +++ b/llm/src/federated/llm_game_federated.lf @@ -1,5 +1,5 @@ ### llm.py file needs to be in the same directory -target Python { keepalive: true, files: ["agents/llm_a.py", "agents/llm_b.py" ] } #"llm_b.py" +target Python { keepalive: true, files: ["../../../src/agents/llm_a.py", "../../../src/agents/llm_b.py" ] } #"llm_b.py" import LlmA, LlmB, Judge from "llm_base_class_federate.lf" From 050fe9f6eae0e237a598d2a5d2c2b4e51685488b Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:17:49 -0700 Subject: [PATCH 35/54] Corrected the paths of the images in the README.md --- llm/src/federated/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/src/federated/README.md b/llm/src/federated/README.md index 99adf09..ef8970b 100644 --- a/llm/src/federated/README.md +++ b/llm/src/federated/README.md @@ -48,10 +48,10 @@ pip install torch torchvision nvidia-smi ``` Look for the line "CUDA Version" as shown in the image: \ - + With the correct version install PyTorch from [PyTorch](https://pytorch.org/get-started/locally/) by selecting the right correct OS and compute platform as shown in the image below for Linux system with CUDA version 12.8: \ - + ### Step 3: Model Dependencies - **Pre-trained Models used in the agents/llm_a.py and agents/llm_b.py**: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) , [meta-llama/Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) \ **Note:** Follow the steps below to obtain the access and authentication key for the hugging face models. From 8634b49cb784f42cf83d92f9c9526df0aac7a8a7 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:18:46 -0700 Subject: [PATCH 36/54] added the contributors name README.md --- llm/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/llm/README.md b/llm/README.md index 39fc3f0..f87cf44 100644 --- a/llm/README.md +++ b/llm/README.md @@ -126,3 +126,5 @@ Answer: Seoul. # Contributors +- Deeksha Prahlad (dprahlad@asu.edu), Ph.D. student at Arizona State University +- Hokeun Kim (hokeun@asu.edu, https://hokeun.github.io/), Assistant professor at Arizona State University From 2e73975d4863c7543ba1b67d71c07bc501bd3e25 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Fri, 17 Oct 2025 09:19:34 -0700 Subject: [PATCH 37/54] Removed torch and torchvision since they are dependent on the device --- llm/requirements.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/llm/requirements.txt b/llm/requirements.txt index c126cad..c8a18f7 100644 --- a/llm/requirements.txt +++ b/llm/requirements.txt @@ -2,6 +2,4 @@ accelerate transformers tokenizers bitsandbytes>=0.43.0 -torch -torchvision From 3ccb0f24c66e93650c7c2110d379e7d3b2482b93 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Fri, 17 Oct 2025 09:23:15 -0700 Subject: [PATCH 38/54] corrected few things on the README regarding the different reactors --- llm/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llm/README.md b/llm/README.md index f87cf44..523042d 100644 --- a/llm/README.md +++ b/llm/README.md @@ -1,6 +1,6 @@ # LLM Demo Overview -This is a quiz-style game between two LLM agents. For each user question typed at the keyboard, both agents answer in parallel. The Judge announces whichever answer arrives first (or a timeout if neither responds within 60 sec), and prints per-question elapsed logical and physical times. +This is a quiz-style game between two LLM agents. For each user question typed at the keyboard for the judge, both agents answer in parallel. The Judge announces whichever answer arrives first (or a timeout if neither responds within 60 sec), and prints per-question elapsed logical and physical times. # Directory Structure - [federated](src/federated/) - Directory for federated versions of LLM demos. @@ -80,8 +80,8 @@ The demo was tested with the following hardware setup. Make sure the environment is properly configured to use CUDA for optimal GPU acceleration. # Files and directories in this repository - - **`llm_base_class.lf`** - Contains the base reactors LlmA, LlmB, Keyboard and Judge.. - - **`llm_quiz_game.lf`** - Lingua Franca program that defines the quiz game reactors (Keyboard input, LLM agent A, LLM agent B and Judge). + - **`llm_base_class.lf`** - Contains the base reactors LlmA, LlmB, and Judge. + - **`llm_quiz_game.lf`** - Lingua Franca program that defines the quiz game reactors (LLM agent A, LLM agent B and Judge). # Execution Workflow From ae288630f44c1685077b47741d057233d39e20e3 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Fri, 17 Oct 2025 11:00:18 -0700 Subject: [PATCH 39/54] Updated the required python version in the README.md --- llm/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/README.md b/llm/README.md index 523042d..6c8d8b3 100644 --- a/llm/README.md +++ b/llm/README.md @@ -8,7 +8,7 @@ This is a quiz-style game between two LLM agents. For each user question typed a # Pre-requisites -You need Python installed, as llm.py is written in Python. +You need Python >= 3.10 installed. ## Library Dependencies To run this project, there are dependencies required which are in [requirements.txt](requirements.txt) file. The model used in this repository has been quantized using 4-bit precision (bnb_4bit) and relies on bitsandbytes for efficient matrix operations and memory optimization. So specific versions of bitsandbytes, torch, and torchvision are mandatory for compatibility. From b09a9c34a3626b61121888f5f67ac833bde09434 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Fri, 17 Oct 2025 11:48:45 -0700 Subject: [PATCH 40/54] Added a command to check if requirements are installed README.md --- llm/README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/llm/README.md b/llm/README.md index 6c8d8b3..7cfd16e 100644 --- a/llm/README.md +++ b/llm/README.md @@ -36,10 +36,15 @@ If it is not installed: ``` python -m pip install --upgrade pip ``` -Run this command to install the packages from the [requirements.txt](requirements.txt) file: +Run this command to install the packages from the [requirements.txt](requirements.txt) file:\ +**Note**: Since we are using LLMs with 7B and 70B parameters it is recommended to have a device with GPU support. ``` pip install -r requirements.txt ``` +To check if all the requirements are installed, run: +``` +pip list | grep -E "transformers|accelerate|tokenizers|bitsandbytes" +``` For installing torch: 1. For devices without GPU From 042317fa086237b8ee1d95c55d8e896c5a7bcb23 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Fri, 17 Oct 2025 14:05:35 -0700 Subject: [PATCH 41/54] added the common environment name README.md --- llm/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/llm/README.md b/llm/README.md index 7cfd16e..c97f213 100644 --- a/llm/README.md +++ b/llm/README.md @@ -17,15 +17,15 @@ It is highly recommended to create a Python virtual environment or a Conda envir To create the a virtual environment follow the steps below. ### Step 1: Creating environment -Replace this <> with the environment name ``` -python3 -m venv -source /bin/activate +python3 -m venv llm +source llm/bin/activate ``` +For activating the environment everytime use "source llm/bin/activate". or ``` -conda create -n -conda activate +conda create -n llm +conda activate llm ``` ### Step 2: Installing the required packages Check if pip is installed: From 8a71d549fe3bff383f330bbebb6e20306b336d85 Mon Sep 17 00:00:00 2001 From: Deeksha-20-99 Date: Tue, 11 Nov 2025 09:21:42 -0700 Subject: [PATCH 42/54] This is the .py file for running the llm_quiz on laptops --- llm/src/agents/llm_small.py | 89 +++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 llm/src/agents/llm_small.py diff --git a/llm/src/agents/llm_small.py b/llm/src/agents/llm_small.py new file mode 100644 index 0000000..a02de1d --- /dev/null +++ b/llm/src/agents/llm_small.py @@ -0,0 +1,89 @@ +### Import Libraries +import transformers +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig +from torch import cuda, bfloat16 + + +### Model to be chosen to act as an agent +model_id = "microsoft/Phi-3.5-mini-instruct" +model_id_2 = "EleutherAI/pythia-70m" + +### To check if there is GPU and convert it into float 16 +has_cuda = torch.cuda.is_available() +dtype = torch.bfloat16 if has_cuda else torch.float32 + +### To convert the model into 4bit quantization +bnb_config = None +### if there is cuda then the model is converted to 4bit quantization +if has_cuda: + try: + import bitsandbytes as bnb + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=dtype, + ) + except Exception: + bnb_config = None + +### calling pre-trained tokenizer +tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True) +tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2, use_fast=True) +for tok in (tokenizer, tokenizer_2): + if tok.pad_token_id is None: + tok.pad_token = tok.eos_token + +### since both the models have same device map and using 4bit quantization for both +common = dict( + device_map="auto" if has_cuda else None, + dtype=dtype, + low_cpu_mem_usage=True, +) +if bnb_config is not None: + common["quantization_config"] = bnb_config + +### calling pre-trained model +model = AutoModelForCausalLM.from_pretrained(model_id, **common) +model_2 = AutoModelForCausalLM.from_pretrained(model_id_2, **common) +model.eval(); model_2.eval() + + +### arguments for both the models +GEN_A = dict(max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id) +GEN_B = dict(max_new_tokens=24, do_sample=False, temperature=0.1, + eos_token_id=tokenizer_2.eos_token_id, pad_token_id=tokenizer_2.pad_token_id) + +###to resturn only one line answers +def postprocess(text: str) -> str: + t = text.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + +###Calling agent1 from .lf code +def agent1(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer(prompt, return_tensors="pt") + if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = model.generate(**inputs, **GEN_A) + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer.decode(out[0][prompt_len:], skip_special_tokens=True) + return postprocess(result) + +###Calling agent2 from .lf code +def agent2(q: str) -> str: + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + inputs = tokenizer_2(prompt, return_tensors="pt") + if has_cuda: inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = model_2.generate(**inputs, **GEN_B) + prompt_len = inputs["input_ids"].shape[1] + result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) + return postprocess(result) \ No newline at end of file From 06f72084cc6f997513e278de0b1c054c11b6591a Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad Date: Thu, 27 Nov 2025 12:58:32 -0700 Subject: [PATCH 43/54] Added changed to remove DNET warnings --- llm/src/federated/llm_base_class_federate.lf | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/llm/src/federated/llm_base_class_federate.lf b/llm/src/federated/llm_base_class_federate.lf index 57171ed..b1df00d 100644 --- a/llm/src/federated/llm_base_class_federate.lf +++ b/llm/src/federated/llm_base_class_federate.lf @@ -8,8 +8,8 @@ reactor LlmA { state ready = False input user_in - physical action done - physical action notify_ready + logical action done + logical action notify_ready output answer output ready_out @@ -64,8 +64,8 @@ reactor LlmB { state ready = False input user_in - physical action done - physical action notify_ready + logical action done + logical action notify_ready output answer output ready_out @@ -126,8 +126,8 @@ reactor Judge { input ready_b state a_ready = False state b_ready = False - physical action line - physical action tick + logical action line + logical action tick logical action timeout(60 sec) output ask input llma From b6918e2bec3b14efa87b049935d4bae4a203a9e7 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad Date: Thu, 27 Nov 2025 16:18:07 -0700 Subject: [PATCH 44/54] Adding a newline for the code --- llm/src/agents/llm.py | 2 +- llm/src/agents/llm_a.py | 2 +- llm/src/agents/llm_b.py | 2 +- llm/src/agents/llm_b_jetson.py | 1 + llm/src/agents/llm_b_m2.py | 2 +- llm/src/agents/llm_small.py | 2 +- llm/src/federated/llm_base_class_federate.lf | 2 +- llm/src/federated/llm_game_federated.lf | 1 + llm/src/llm_base_class.lf | 2 +- llm/src/llm_quiz_game.lf | 2 +- 10 files changed, 10 insertions(+), 8 deletions(-) diff --git a/llm/src/agents/llm.py b/llm/src/agents/llm.py index 1d88658..4ef69f2 100644 --- a/llm/src/agents/llm.py +++ b/llm/src/agents/llm.py @@ -86,4 +86,4 @@ def agent2(q: str) -> str: out = model_2.generate(**inputs, **GEN_B) prompt_len = inputs["input_ids"].shape[1] result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) - return postprocess(result) \ No newline at end of file + return postprocess(result) diff --git a/llm/src/agents/llm_a.py b/llm/src/agents/llm_a.py index 0e888bc..0126e48 100644 --- a/llm/src/agents/llm_a.py +++ b/llm/src/agents/llm_a.py @@ -75,4 +75,4 @@ def agent1(q: str) -> str: prompt_len = inputs["input_ids"].shape[1] result = tokenizer.decode(out[0][prompt_len:], skip_special_tokens=True) print(result) - return postprocess(result) \ No newline at end of file + return postprocess(result) diff --git a/llm/src/agents/llm_b.py b/llm/src/agents/llm_b.py index 621d43d..9ae257f 100644 --- a/llm/src/agents/llm_b.py +++ b/llm/src/agents/llm_b.py @@ -78,4 +78,4 @@ def agent2(q: str) -> str: prompt_len = inputs["input_ids"].shape[1] result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) print(result) - return postprocess(result) \ No newline at end of file + return postprocess(result) diff --git a/llm/src/agents/llm_b_jetson.py b/llm/src/agents/llm_b_jetson.py index b57e157..8ac042f 100644 --- a/llm/src/agents/llm_b_jetson.py +++ b/llm/src/agents/llm_b_jetson.py @@ -45,3 +45,4 @@ def agent2(q: str) -> str: out = model.generate(**inputs, **GEN) gen = out[0, inputs["input_ids"].shape[1]:] return tokenizer.decode(gen, skip_special_tokens=True).strip() + diff --git a/llm/src/agents/llm_b_m2.py b/llm/src/agents/llm_b_m2.py index adf4e15..aa699ec 100644 --- a/llm/src/agents/llm_b_m2.py +++ b/llm/src/agents/llm_b_m2.py @@ -89,4 +89,4 @@ def agent2(q: str) -> str: prompt_len = inputs["input_ids"].shape[1] result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) print(result) - return postprocess(result) \ No newline at end of file + return postprocess(result) diff --git a/llm/src/agents/llm_small.py b/llm/src/agents/llm_small.py index a02de1d..0a1c985 100644 --- a/llm/src/agents/llm_small.py +++ b/llm/src/agents/llm_small.py @@ -86,4 +86,4 @@ def agent2(q: str) -> str: out = model_2.generate(**inputs, **GEN_B) prompt_len = inputs["input_ids"].shape[1] result = tokenizer_2.decode(out[0][prompt_len:], skip_special_tokens=True) - return postprocess(result) \ No newline at end of file + return postprocess(result) diff --git a/llm/src/federated/llm_base_class_federate.lf b/llm/src/federated/llm_base_class_federate.lf index b1df00d..b1e15b2 100644 --- a/llm/src/federated/llm_base_class_federate.lf +++ b/llm/src/federated/llm_base_class_federate.lf @@ -241,4 +241,4 @@ reactor Judge { if self.th and self.th.is_alive(): self.th.join() =} -} \ No newline at end of file +} diff --git a/llm/src/federated/llm_game_federated.lf b/llm/src/federated/llm_game_federated.lf index 7e6b961..96d0f5b 100644 --- a/llm/src/federated/llm_game_federated.lf +++ b/llm/src/federated/llm_game_federated.lf @@ -27,3 +27,4 @@ federated reactor llm_game_federated at 10.218.100.95 { } + diff --git a/llm/src/llm_base_class.lf b/llm/src/llm_base_class.lf index ae6f1c7..282e3bb 100644 --- a/llm/src/llm_base_class.lf +++ b/llm/src/llm_base_class.lf @@ -158,4 +158,4 @@ reactor Judge { if self.th and self.th.is_alive(): self.th.join() =} -} \ No newline at end of file +} diff --git a/llm/src/llm_quiz_game.lf b/llm/src/llm_quiz_game.lf index 9f05ce6..e5ebf20 100644 --- a/llm/src/llm_quiz_game.lf +++ b/llm/src/llm_quiz_game.lf @@ -20,4 +20,4 @@ main reactor { j.ask -> llmb_response.user_in llma_response.answer -> j.llma llmb_response.answer -> j.llmb -} \ No newline at end of file +} From 45216baa4820a5c293ef5456fcc7a2b441840af5 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad Date: Thu, 27 Nov 2025 18:36:33 -0700 Subject: [PATCH 45/54] fixed the warnings for llm_quiz_game --- llm/src/llm_base_class.lf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llm/src/llm_base_class.lf b/llm/src/llm_base_class.lf index 282e3bb..82e374b 100644 --- a/llm/src/llm_base_class.lf +++ b/llm/src/llm_base_class.lf @@ -8,7 +8,7 @@ reactor LlmA { state out_buffer = "" input user_in - physical action done + logical action done output answer @@ -41,7 +41,7 @@ reactor LlmB { input user_in output answer - physical action done + logical action done reaction(user_in)->done{= if self.running: @@ -81,7 +81,7 @@ reactor Judge { state physical_base_time = 0 state winner = "" - physical action line + logical action line logical action timeout(60 sec) reaction(startup) -> line {= From d22adadcc4150b8f883385688b260bc429fbfbab Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad Date: Fri, 28 Nov 2025 17:50:29 -0700 Subject: [PATCH 46/54] re-organized the code to remove dependancy of llm.py files --- llm/src/federated/llm_base_class_federate.lf | 301 ++++++++++++------- llm/src/federated/llm_game_federated.lf | 26 +- llm/src/llm_base_class.lf | 210 +++++++++++-- llm/src/llm_quiz_game.lf | 9 +- 4 files changed, 377 insertions(+), 169 deletions(-) diff --git a/llm/src/federated/llm_base_class_federate.lf b/llm/src/federated/llm_base_class_federate.lf index b1e15b2..cc0a0ee 100644 --- a/llm/src/federated/llm_base_class_federate.lf +++ b/llm/src/federated/llm_base_class_federate.lf @@ -1,52 +1,96 @@ -target Python +target Python { keepalive: true } + +preamble {= + import threading + import torch + from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig +=} -### Reactor for calling agent 1 reactor LlmA { state th state running = False state out_buffer = "" state ready = False - input user_in - logical action done - logical action notify_ready + input user_in output answer output ready_out + logical action done - reaction(startup) -> notify_ready {= - import os, sys, importlib.util, threading, traceback - act = notify_ready - def _load(): - try: - here = os.path.dirname(__file__) - if here not in sys.path: sys.path.insert(0, here) - from llm_a import agent1 - act.schedule(1) - except Exception as e: - print("[LlmA] Preload failed:", e, flush=True) - traceback.print_exc() - threading.Thread(target=_load, daemon=True).start() - =} + reaction(startup) -> ready_out {= + print("[LlmA] Loading 7B", flush=True) + + self.has_cuda = torch.cuda.is_available() + self.dtype = torch.bfloat16 if self.has_cuda else torch.float32 + + model_id = "meta-llama/Llama-2-7b-chat-hf" + + self.tokenizer_a = AutoTokenizer.from_pretrained(model_id, use_fast=True) + if self.tokenizer_a.pad_token_id is None: + self.tokenizer_a.pad_token = self.tokenizer_a.eos_token + + common_a = dict( + device_map="auto" if self.has_cuda else None, + torch_dtype=self.dtype, + low_cpu_mem_usage=True, + ) + + try: + import bitsandbytes as bnb + common_a["quantization_config"] = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=self.dtype, + ) + except Exception: + pass - reaction(notify_ready) -> ready_out {= + self.model_a = AutoModelForCausalLM.from_pretrained(model_id, **common_a) + self.model_a.eval() + + self.GEN_A = dict( + max_new_tokens=24, + do_sample=False, + temperature=0.1, + eos_token_id=self.tokenizer_a.eos_token_id, + pad_token_id=self.tokenizer_a.pad_token_id, + ) + + print("[LlmA] Ready.", flush=True) self.ready = True ready_out.set(True) =} reaction(user_in) -> done {= - import threading - if not self.ready: return - if self.running: return + if not self.ready: + return + if self.running: + return self.running = True - q = user_in.value - from llm_a import agent1 - def agentA(): + query = user_in.value + + def worker(): try: - self.out_buffer = agent1(q) + prompt = f"You are a concise Q&A assistant.\n\n{query}\n" + inputs = self.tokenizer_a(prompt, return_tensors="pt") + if self.has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = self.model_a.generate(**inputs, **self.GEN_A) + plen = inputs["input_ids"].shape[1] + txt = self.tokenizer_a.decode(out[0][plen:], skip_special_tokens=True) + t = txt.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + self.out_buffer = t.strip().strip(":").strip() finally: - try: done.schedule(5) - except Exception as e: print("[LlmA] schedule failed:", e, flush=True) - self.th = threading.Thread(target=agentA, daemon=True) + done.schedule(0) + + self.th = threading.Thread(target=worker, daemon=True) self.th.start() =} @@ -56,53 +100,91 @@ reactor LlmA { =} } -### Reactor for calling agent 2 reactor LlmB { state th state running = False state out_buffer = "" state ready = False - input user_in - logical action done - logical action notify_ready + input user_in output answer output ready_out + logical action done - reaction(startup) -> notify_ready {= - import os, sys, importlib.util, threading, traceback - act = notify_ready - def _load(): - try: - here = os.path.dirname(__file__) - if here not in sys.path: sys.path.insert(0, here) - from llm_b import agent2 - act.schedule(1) - except Exception as e: - print("[LlmB] Preload failed:", e, flush=True) - traceback.print_exc() - threading.Thread(target=_load, daemon=True).start() - =} + reaction(startup) -> ready_out {= + print("[LlmB] Loading 70B", flush=True) - reaction(notify_ready) -> ready_out {= + self.has_cuda = torch.cuda.is_available() + self.dtype = torch.bfloat16 if self.has_cuda else torch.float32 + + model_id = "meta-llama/Llama-2-70b-chat-hf" + + self.tokenizer_b = AutoTokenizer.from_pretrained(model_id, use_fast=True) + if self.tokenizer_b.pad_token_id is None: + self.tokenizer_b.pad_token = self.tokenizer_b.eos_token + + common_b = dict( + device_map="auto" if self.has_cuda else None, + torch_dtype=self.dtype, + low_cpu_mem_usage=True, + ) + + try: + import bitsandbytes as bnb + common_b["quantization_config"] = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=self.dtype, + ) + except Exception: + pass + + self.model_b = AutoModelForCausalLM.from_pretrained(model_id, **common_b) + self.model_b.eval() + + self.GEN_B = dict( + max_new_tokens=24, + do_sample=False, + temperature=0.1, + eos_token_id=self.tokenizer_b.eos_token_id, + pad_token_id=self.tokenizer_b.pad_token_id, + ) + + print("[LlmB] Ready.", flush=True) self.ready = True ready_out.set(True) =} reaction(user_in) -> done {= - import threading - if not self.ready: return - if self.running: return + if not self.ready: + return + if self.running: + return self.running = True - q = user_in.value - from llm_b import agent2 - def agentB(): + query = user_in.value + + def worker(): try: - self.out_buffer = agent2(q) + prompt = f"You are a concise Q&A assistant.\n\n{query}\n" + inputs = self.tokenizer_b(prompt, return_tensors="pt") + if self.has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + with torch.no_grad(): + out = self.model_b.generate(**inputs, **self.GEN_B) + plen = inputs["input_ids"].shape[1] + txt = self.tokenizer_b.decode(out[0][plen:], skip_special_tokens=True) + t = txt.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + self.out_buffer = t.strip().strip(":").strip() finally: - try: done.schedule(5) - except Exception as e: print("[LlmB] schedule failed:", e, flush=True) - self.th = threading.Thread(target=agentB, daemon=True) + done.schedule(0) + + self.th = threading.Thread(target=worker, daemon=True) self.th.start() =} @@ -111,7 +193,6 @@ reactor LlmB { answer.set(self.out_buffer) =} } -###Judge reactor to determine which agent responds first reactor Judge { state th @@ -122,66 +203,59 @@ reactor Judge { state waiting = False state logical_base_time = 0 state physical_base_time = 0 - input ready_a - input ready_b - state a_ready = False - state b_ready = False - logical action line - logical action tick - logical action timeout(60 sec) + state a_ready = False + state b_ready = False + + input ready_a + input ready_b + input llma + input llmb + output ask - input llma - input llmb output quit + logical action line + logical action tick + physical action timeout(60 sec) + reaction(startup) {= - print("[Judge] Waiting for models to load", flush=True) + print("[Judge] Waiting for models", flush=True) =} - reaction(ready_a)->line {= + reaction(ready_a) -> line {= self.a_ready = True if self.a_ready and self.b_ready and not self.reader_started: - import sys, threading + import threading def reader(): while not self.terminate: s = input("Enter the quiz question (or 'quit')\n") if s == "" or s.lower().strip() == "quit": self.eof = True - try: line.schedule(0) - except Exception as e: print("[Judge] schedule EOF failed:", e, flush=True) + line.schedule(0) break - else: - self.buffer = s - try: line.schedule(1) - except Exception as e: - print("[Judge] schedule line failed:", e, flush=True) - break + self.buffer = s + line.schedule(1) self.reader_started = True - print("[Judge] Models ready. You can ask questions now.", flush=True) + print("[Judge] Ready", flush=True) self.th = threading.Thread(target=reader, daemon=True) self.th.start() =} - reaction(ready_b)->line {= + reaction(ready_b) -> line {= self.b_ready = True if self.a_ready and self.b_ready and not self.reader_started: - import sys, threading + import threading def reader(): while not self.terminate: s = input("Enter the quiz question (or 'quit')\n") if s == "" or s.lower().strip() == "quit": self.eof = True - try: line.schedule(0) - except Exception as e: print("[Judge] schedule EOF failed:", e, flush=True) + line.schedule(0) break - else: - self.buffer = s - try: line.schedule(1) - except Exception as e: - print("[Judge] schedule line failed:", e, flush=True) - break + self.buffer = s + line.schedule(1) self.reader_started = True - print("[Judge] Models ready. You can ask questions now.", flush=True) + print("[Judge] Ready", flush=True) self.th = threading.Thread(target=reader, daemon=True) self.th.start() =} @@ -192,7 +266,7 @@ reactor Judge { environment().sync_shutdown() else: self.waiting = True - self.logical_base_time = lf.time.logical_elapsed() + self.logical_base_time = lf.time.logical_elapsed() self.physical_base_time = lf.time.physical_elapsed() timeout.schedule(0) print(f"\n\n\nQuery: {self.buffer}\n", flush=True) @@ -205,35 +279,38 @@ reactor Judge { =} reaction(llma) {= - if not self.waiting: return + if not self.waiting: + return self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + ln = lf.time.logical_elapsed() + pn = lf.time.physical_elapsed() + lm = int((ln - self.logical_base_time)/1000000) + pm = int((pn - self.physical_base_time)/1000000) + print(f"Winner: LLM-A | logical {lm} ms | physical {pm} ms", flush=True) print(f"{llma.value}", flush=True) =} reaction(llmb) {= - if not self.waiting: return + if not self.waiting: + return self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + ln = lf.time.logical_elapsed() + pn = lf.time.physical_elapsed() + lm = int((ln - self.logical_base_time)/1000000) + pm = int((pn - self.physical_base_time)/1000000) + print(f"Winner: LLM-B | logical {lm} ms | physical {pm} ms", flush=True) print(f"{llmb.value}", flush=True) =} reaction(timeout) {= - if not self.waiting: return + if not self.waiting: + return self.waiting = False - logical_now = lf.time.logical_elapsed() - physical_now = lf.time.physical_elapsed() - logical_ms = int((logical_now - self.logical_base_time) / 1000000) - physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + ln = lf.time.logical_elapsed() + pn = lf.time.physical_elapsed() + lm = int((ln - self.logical_base_time)/1000000) + pm = int((pn - self.physical_base_time)/1000000) + print(f"TIMEOUT (60 s) | logical {lm} ms | physical {pm} ms", flush=True) =} reaction(shutdown) {= @@ -241,4 +318,4 @@ reactor Judge { if self.th and self.th.is_alive(): self.th.join() =} -} +} \ No newline at end of file diff --git a/llm/src/federated/llm_game_federated.lf b/llm/src/federated/llm_game_federated.lf index 96d0f5b..8461eed 100644 --- a/llm/src/federated/llm_game_federated.lf +++ b/llm/src/federated/llm_game_federated.lf @@ -1,30 +1,18 @@ -### llm.py file needs to be in the same directory -target Python { keepalive: true, files: ["../../../src/agents/llm_a.py", "../../../src/agents/llm_b.py" ] } #"llm_b.py" +target Python { keepalive: true } -import LlmA, LlmB, Judge from "llm_base_class_federate.lf" +import LlmA, LlmB, Judge from "llm_base_class_federate_in.lf" -preamble {= - import threading - import time - from llm_a import agent1 - from llm_b_m2 import agent2 -=} - - -federated reactor llm_game_federated at 10.218.100.95 { - - j = new Judge() - llma = new LlmA() - llmb = new LlmB() +federated reactor llm_game_federated at 10.218.100.78 { + j = new Judge() + llma = new LlmA() + llmb = new LlmB() j.ask -> llma.user_in j.ask -> llmb.user_in + llma.answer -> j.llma llmb.answer -> j.llmb llma.ready_out -> j.ready_a llmb.ready_out -> j.ready_b - } - - diff --git a/llm/src/llm_base_class.lf b/llm/src/llm_base_class.lf index 82e374b..0d4a10d 100644 --- a/llm/src/llm_base_class.lf +++ b/llm/src/llm_base_class.lf @@ -1,28 +1,106 @@ -target Python +target Python { keepalive: true } +preamble {= + import threading + import torch + from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig +=} -### Reactor for calling agent 1 reactor LlmA { state th state running = False state out_buffer = "" - input user_in - logical action done - output answer + input user_in + output answer + logical action done + + preamble {= + print("[LlmA] Loading Llama-2-7B chat model", flush=True) + has_cuda = torch.cuda.is_available() + dtype = torch.bfloat16 if has_cuda else torch.float32 + + model_id_a = "meta-llama/Llama-2-7b-chat-hf" + + tokenizer_a = AutoTokenizer.from_pretrained(model_id_a, use_fast=True) + if tokenizer_a.pad_token_id is None: + tokenizer_a.pad_token = tokenizer_a.eos_token + + common_a = dict( + device_map="auto" if has_cuda else None, + torch_dtype=dtype, + low_cpu_mem_usage=True, + ) + + try: + import bitsandbytes as bnb # noqa: F401 + quant_a = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=dtype, + ) + common_a["quantization_config"] = quant_a + except Exception: + quant_a = None + + # Model as class attribute + model_a = AutoModelForCausalLM.from_pretrained(model_id_a, **common_a) + model_a.eval() + + generation_a = dict( + max_new_tokens=24, + do_sample=False, + temperature=0.1, + eos_token_id=tokenizer_a.eos_token_id, + pad_token_id=tokenizer_a.pad_token_id, + ) + + def run_llm_a(self, q: str) -> str: + """Bound method: uses class-level model/tokenizer.""" + cls = type(self) + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + + tok = cls.tokenizer_a + model = cls.model_a + generation_a = cls.generation_a + has_cuda = cls.has_cuda + inputs = tok(prompt, return_tensors="pt") + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + + with torch.no_grad(): + out = model.generate(**inputs, **generation_a) + + prompt_len = inputs["input_ids"].shape[1] + result = tok.decode(out[0][prompt_len:], skip_special_tokens=True) + + # Make it one-line-ish + t = result.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + + print("[LlmA] 7B model ready.", flush=True) + =} reaction(user_in) -> done {= if self.running: return self.running = True query = user_in.value - def agentA(): + + def worker(): try: - self.out_buffer = agent1(query) + self.out_buffer = self.run_llm_a(query) finally: - done.schedule(1) - self.th = threading.Thread(target=agentA, daemon=True) + done.schedule(0) + + self.th = threading.Thread(target=worker, daemon=True) self.th.start() =} @@ -33,38 +111,109 @@ reactor LlmA { } -### Reactor for calling agent 2 reactor LlmB { state th state running = False state out_buffer = "" - input user_in - output answer + input user_in + output answer logical action done - reaction(user_in)->done{= + preamble {= + print("[LlmB] Loading Llama-2-70B chat model...", flush=True) + + has_cuda = torch.cuda.is_available() + dtype = torch.bfloat16 if has_cuda else torch.float32 + + model_id_b = "meta-llama/Llama-2-70b-chat-hf" + + tokenizer_b = AutoTokenizer.from_pretrained(model_id_b, use_fast=True) + if tokenizer_b.pad_token_id is None: + tokenizer_b.pad_token = tokenizer_b.eos_token + + common_b = dict( + device_map="auto" if has_cuda else None, + torch_dtype=dtype, + low_cpu_mem_usage=True, + ) + + try: + import bitsandbytes as bnb # noqa: F401 + quant_b = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=dtype, + ) + common_b["quantization_config"] = quant_b + except Exception: + quant_b = None + + model_b = AutoModelForCausalLM.from_pretrained(model_id_b, **common_b) + model_b.eval() + + generation_b = dict( + max_new_tokens=24, + do_sample=False, + temperature=0.1, + eos_token_id=tokenizer_b.eos_token_id, + pad_token_id=tokenizer_b.pad_token_id, + ) + + def run_llm_b(self, q: str) -> str: + cls = type(self) + prompt = f"You are a concise Q&A assistant.\n\n{q}\n" + + tok = cls.tokenizer_b + model = cls.model_b + generation_b = cls.generation_b + has_cuda = cls.has_cuda + + inputs = tok(prompt, return_tensors="pt") + if has_cuda: + inputs = {k: v.to("cuda") for k, v in inputs.items()} + + with torch.no_grad(): + out = model.generate(**inputs, **generation_b) + + prompt_len = inputs["input_ids"].shape[1] + result = tok.decode(out[0][prompt_len:], skip_special_tokens=True) + + t = result.strip() + for sep in ["\n", ". ", " "]: + idx = t.find(sep) + if idx > 0: + t = t[:idx] + break + return t.strip().strip(":").strip() + + print("[LlmB] 70B model ready.", flush=True) + =} + + reaction(user_in) -> done {= if self.running: return self.running = True query = user_in.value - def agentB(): + + def worker(): try: - self.out_buffer = agent2(query) + self.out_buffer = self.run_llm_b(query) finally: - done.schedule(1) - self.th = threading.Thread(target=agentB, daemon=True) + done.schedule(0) + + self.th = threading.Thread(target=worker, daemon=True) self.th.start() =} - reaction(done)->answer{= + reaction(done) -> answer {= self.running = False answer.set(self.out_buffer) =} - } -### Reactor for Judge + reactor Judge { state th state terminate = False @@ -73,8 +222,8 @@ reactor Judge { output ask output quit - input llma - input llmb + input llma + input llmb state waiting = False state logical_base_time = 0 @@ -99,6 +248,7 @@ reactor Judge { else: self.buffer = s line.schedule(1) + self.th = threading.Thread(target=reader, daemon=True) self.th.start() =} @@ -113,8 +263,8 @@ reactor Judge { self.logical_base_time = lf.time.logical_elapsed() self.physical_base_time = lf.time.physical_elapsed() timeout.schedule(0) - print(f"\n\n\nQuery: {self.buffer}\n") - print("waiting...\n") + print(f"\n\n\nQuery: {self.buffer}\n", flush=True) + print("waiting...\n", flush=True) ask.set(self.buffer) =} @@ -126,8 +276,8 @@ reactor Judge { physical_now = lf.time.physical_elapsed() logical_ms = int((logical_now - self.logical_base_time) / 1000000) physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms") - print(f"{llma.value}") + print(f" Winner: LLM-A | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + print(f"{llma.value}", flush=True) =} reaction(llmb) {= @@ -138,8 +288,8 @@ reactor Judge { physical_now = lf.time.physical_elapsed() logical_ms = int((logical_now - self.logical_base_time) / 1000000) physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms") - print(f"{llmb.value}") + print(f"Winner: LLM-B | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) + print(f"{llmb.value}", flush=True) =} reaction(timeout) {= @@ -150,7 +300,7 @@ reactor Judge { physical_now = lf.time.physical_elapsed() logical_ms = int((logical_now - self.logical_base_time) / 1000000) physical_ms = int((physical_now - self.physical_base_time) / 1000000) - print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms") + print(f"TIMEOUT (60 s) | logical {logical_ms} ms | physical {physical_ms} ms", flush=True) =} reaction(shutdown) {= @@ -158,4 +308,4 @@ reactor Judge { if self.th and self.th.is_alive(): self.th.join() =} -} +} \ No newline at end of file diff --git a/llm/src/llm_quiz_game.lf b/llm/src/llm_quiz_game.lf index e5ebf20..bbe765c 100644 --- a/llm/src/llm_quiz_game.lf +++ b/llm/src/llm_quiz_game.lf @@ -1,16 +1,9 @@ -target Python { keepalive: true, files: ["agents/llm.py"] } - +target Python { keepalive: true } import LlmA from "llm_base_class.lf" import LlmB from "llm_base_class.lf" import Judge from "llm_base_class.lf" -preamble {= - import threading - import time - from llm import agent1, agent2 -=} - main reactor { llma_response = new LlmA() llmb_response = new LlmB() From d3d416558569faa8287e1f94e5937e843c5516b6 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad Date: Sat, 29 Nov 2025 12:43:19 -0700 Subject: [PATCH 47/54] commented the code to files --- llm/src/federated/llm_base_class_federate.lf | 25 +++++++++++++++- llm/src/federated/llm_game_federated.lf | 3 +- llm/src/llm_base_class.lf | 31 +++++++++++++++++--- llm/src/llm_quiz_game.lf | 10 +++++++ 4 files changed, 63 insertions(+), 6 deletions(-) diff --git a/llm/src/federated/llm_base_class_federate.lf b/llm/src/federated/llm_base_class_federate.lf index cc0a0ee..a7ae791 100644 --- a/llm/src/federated/llm_base_class_federate.lf +++ b/llm/src/federated/llm_base_class_federate.lf @@ -1,3 +1,26 @@ +/** + * This program implements a simple LF-based quiz between two LLM agents + * and a Judge reactor that measures latency. + * + * LlmA loads a Llama-2-7B chat model. + * LlmB loads a Llama-2-70B chat model. + * Both use optional 4-bit quantization (bitsandbytes) and, run on a CUDA GPU. + * + * Each Llm reactor: + * Initializes its tokenizer and model once in the preamble. + * Spawns a background thread per query to call model.generate(). + * Cleans the decoded text into a short, one-line answer. + * Uses a logical action (done) to notify that the answer is ready and sets its output port. + * + * The Judge reactor: + * Reads user queries. + * Broadcasts each query on ask to both LlmA and LlmB. + * Reads logical and physical timestamps when the question is issued. + * Declares the winner as the LLM that responds first and prints its latency and answer. + * Triggers a 60 s timeout if neither LLM responds, and terminates the program when the user types "quit". + * + * @author Deeksha Prahlad + */ target Python { keepalive: true } preamble {= @@ -318,4 +341,4 @@ reactor Judge { if self.th and self.th.is_alive(): self.th.join() =} -} \ No newline at end of file +} diff --git a/llm/src/federated/llm_game_federated.lf b/llm/src/federated/llm_game_federated.lf index 8461eed..fe4d7da 100644 --- a/llm/src/federated/llm_game_federated.lf +++ b/llm/src/federated/llm_game_federated.lf @@ -1,6 +1,6 @@ target Python { keepalive: true } -import LlmA, LlmB, Judge from "llm_base_class_federate_in.lf" +import LlmA, LlmB, Judge from "llm_base_class_federate.lf" federated reactor llm_game_federated at 10.218.100.78 { j = new Judge() @@ -16,3 +16,4 @@ federated reactor llm_game_federated at 10.218.100.78 { llma.ready_out -> j.ready_a llmb.ready_out -> j.ready_b } + diff --git a/llm/src/llm_base_class.lf b/llm/src/llm_base_class.lf index 0d4a10d..000479d 100644 --- a/llm/src/llm_base_class.lf +++ b/llm/src/llm_base_class.lf @@ -1,3 +1,26 @@ +/** + * This program implements a simple LF-based quiz between two LLM agents + * and a Judge reactor that measures latency. + * + * LlmA loads a Llama-2-7B chat model. + * LlmB loads a Llama-2-70B chat model. + * Both use optional 4-bit quantization (bitsandbytes) and, run on a CUDA GPU. + * + * Each Llm reactor: + * Initializes its tokenizer and model once in the preamble. + * Spawns a background thread per query to call model.generate(). + * Cleans the decoded text into a short, one-line answer. + * Uses a logical action (done) to notify that the answer is ready and sets its output port. + * + * The Judge reactor: + * Reads user queries. + * Broadcasts each query on ask to both LlmA and LlmB. + * Reads logical and physical timestamps when the question is issued. + * Declares the winner as the LLM that responds first and prints its latency and answer. + * Triggers a 60 s timeout if neither LLM responds, and terminates the program when the user types "quit". + * + * @author Deeksha Prahlad + */ target Python { keepalive: true } preamble {= @@ -44,7 +67,7 @@ reactor LlmA { except Exception: quant_a = None - # Model as class attribute + model_a = AutoModelForCausalLM.from_pretrained(model_id_a, **common_a) model_a.eval() @@ -76,7 +99,7 @@ reactor LlmA { prompt_len = inputs["input_ids"].shape[1] result = tok.decode(out[0][prompt_len:], skip_special_tokens=True) - # Make it one-line-ish + t = result.strip() for sep in ["\n", ". ", " "]: idx = t.find(sep) @@ -121,7 +144,7 @@ reactor LlmB { logical action done preamble {= - print("[LlmB] Loading Llama-2-70B chat model...", flush=True) + print("[LlmB] Loading Llama-2-70B chat model", flush=True) has_cuda = torch.cuda.is_available() dtype = torch.bfloat16 if has_cuda else torch.float32 @@ -308,4 +331,4 @@ reactor Judge { if self.th and self.th.is_alive(): self.th.join() =} -} \ No newline at end of file +} diff --git a/llm/src/llm_quiz_game.lf b/llm/src/llm_quiz_game.lf index bbe765c..105aa8f 100644 --- a/llm/src/llm_quiz_game.lf +++ b/llm/src/llm_quiz_game.lf @@ -1,3 +1,12 @@ +/** + * Main LF reactor that connects three independently defined reactors: + * + * LlmA : inference for the Llama-2-7B model. + * LlmB : inference for the Llama-2-70B model. + * Judge: Reads user queries, measures timing, and decides the winner. + * This main reactor composes the LLM reactors and Judge into one coordinated system. + * All timing, concurrency, and winner-determination logic reside in the sub-reactors in "llm_base_class.lf" + */ target Python { keepalive: true } import LlmA from "llm_base_class.lf" @@ -14,3 +23,4 @@ main reactor { llma_response.answer -> j.llma llmb_response.answer -> j.llmb } + From 8bcc2eb593f75b79d7c507fb7dc2e6d2e24a7f5b Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Tue, 2 Dec 2025 13:43:46 -0700 Subject: [PATCH 48/54] Revised hardware and software requirements Updated hardware and software requirements in README. --- llm/README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/llm/README.md b/llm/README.md index c97f213..fa8c4e8 100644 --- a/llm/README.md +++ b/llm/README.md @@ -75,12 +75,13 @@ To ensure optimal performance, the following hardware and software requirements ### Hardware Requirements The demo was tested with the following hardware setup. -- **GPU**: NVIDIA RTX A6000 +- **GPU**: NVIDIA RTX A6000, NVIDIA RTX PRO 6000 Blackwell ### Software Requirements - **OS**: Linux -- **Python** -- **CUDA Version**: 12.8 +- **Python**: 3.12.3+ +- **CUDA Version**: 12.8+ +- **Lingua Franca**: 0.10.1 Make sure the environment is properly configured to use CUDA for optimal GPU acceleration. From 913bcbe0b1e9d9c1c5d83e172fc903bf16e5960e Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Tue, 2 Dec 2025 13:45:15 -0700 Subject: [PATCH 49/54] Revise hardware and software requirements in README for federated execution Updated hardware and software requirements in README. --- llm/src/federated/README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/llm/src/federated/README.md b/llm/src/federated/README.md index ef8970b..5504f2f 100644 --- a/llm/src/federated/README.md +++ b/llm/src/federated/README.md @@ -66,12 +66,13 @@ To ensure optimal performance, the following hardware and software requirements ### Hardware Requirements The demo was tested with the following hardware setup. -- **GPU**: NVIDIA RTX A6000 +- **GPU**: NVIDIA RTX A6000, NVIDIA RTX PRO 6000 Blackwell ### Software Requirements - **OS**: Linux -- **Python** -- **CUDA Version**: 12.8 +- **Python**: 3.12.3+ +- **CUDA Version**: 12.8+ +- **Lingua Franca**: 0.10.1 Make sure the environment is properly configured to use CUDA for optimal GPU acceleration. From 97f22f49183fc02671c7797668dcde05424ea50b Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Tue, 2 Dec 2025 13:50:13 -0700 Subject: [PATCH 50/54] Clean up formatting in README.md Removed extra newlines in the README instructions. --- llm/src/federated/README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/llm/src/federated/README.md b/llm/src/federated/README.md index 5504f2f..e63fa0b 100644 --- a/llm/src/federated/README.md +++ b/llm/src/federated/README.md @@ -110,22 +110,18 @@ cd fed-gen/llm_game_federated/ In the first terminal run: ``` ./bin/RTI -n 3 - ``` In the second terminal run: ``` ./bin/federate__j - ``` In the third terminal run: ``` ./bin/federate__llma - ``` In the fourth terminal run: ``` ./bin/federate__llmb - ``` The system will ask for entering the quiz question which is to be obtained from the keyboard input. From 70665a0a9c1446c2fefb9b0c84a0faedb6f40978 Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Tue, 2 Dec 2025 14:00:57 -0700 Subject: [PATCH 51/54] Revise README with updated model execution output Updated execution log details and model loading messages. --- llm/README.md | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/llm/README.md b/llm/README.md index fa8c4e8..f67a00d 100644 --- a/llm/README.md +++ b/llm/README.md @@ -117,15 +117,22 @@ Example output printed on the terminal:

 
 --------------------------------------------------
+******* Using Python version: 3.12.3
+[LlmA] Loading Llama-2-7B chat model
+`torch_dtype` is deprecated! Use `dtype` instead!
+Loading checkpoint shards: 100%|| 2/2 [00:09<00:00,  4.61s/it]
+[LlmA] 7B model ready.
+[LlmB] Loading Llama-2-70B chat model
+Loading checkpoint shards: 100%|| 15/15 [01:36<00:00,  6.40s/it]
+[LlmB] 70B model ready.
 ---- System clock resolution: 1 nsec
----- Start execution on Fri Sep 19 10:46:31 2025 ---- plus 772215861 nanoseconds
+---- Start execution on Tue Dec 02 13:57:35 2025 ---- plus 38464851 nanoseconds
 Enter the quiz question
 What is the capital of South Korea?
+Enter the quiz question
 Query: What is the capital of South Korea?
-
 waiting...
-
-Winner: LLM-B | logical 1184 ms | physical 1184 ms
+Winner: LLM-B | logical 0 ms | physical 2521 ms
 Answer: Seoul.
 --------------------------------------------------
 

From e34db72a2098fc152ffa278035b24212314df9b5 Mon Sep 17 00:00:00 2001
From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com>
Date: Tue, 2 Dec 2025 14:01:33 -0700
Subject: [PATCH 52/54] Update README for `torch_dtype` deprecation

Updated README to reflect deprecation of `torch_dtype`.
---
 llm/README.md | 1 -
 1 file changed, 1 deletion(-)

diff --git a/llm/README.md b/llm/README.md
index f67a00d..ceef144 100644
--- a/llm/README.md
+++ b/llm/README.md
@@ -119,7 +119,6 @@ Example output printed on the terminal:
 --------------------------------------------------
 ******* Using Python version: 3.12.3
 [LlmA] Loading Llama-2-7B chat model
-`torch_dtype` is deprecated! Use `dtype` instead!
 Loading checkpoint shards: 100%|| 2/2 [00:09<00:00,  4.61s/it]
 [LlmA] 7B model ready.
 [LlmB] Loading Llama-2-70B chat model

From d0542f11c6e700c44d268036ab230f08cbfae6db Mon Sep 17 00:00:00 2001
From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com>
Date: Tue, 2 Dec 2025 14:35:23 -0700
Subject: [PATCH 53/54] Revise example output in README for quiz functionality

Updated example output in README to reflect new quiz question and answers.
---
 llm/src/federated/README.md | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/llm/src/federated/README.md b/llm/src/federated/README.md
index e63fa0b..23cc689 100644
--- a/llm/src/federated/README.md
+++ b/llm/src/federated/README.md
@@ -126,21 +126,28 @@ In the fourth terminal run:
 
 The system will ask for entering the quiz question which is to be obtained from the keyboard input.
 
-Example output printed on the terminal:
+Example output printed on the terminal where federate__j is running:
  
 
 
 --------------------------------------------------
+******* Using Python version: 3.12.3
 ---- System clock resolution: 1 nsec
----- Start execution on Fri Sep 19 10:46:31 2025 ---- plus 772215861 nanoseconds
-Enter the quiz question
-What is the capital of South Korea?
-Query: What is the capital of South Korea?
+---- Start execution on Tue Dec 02 14:31:36 2025 ---- plus 537640559 nanoseconds
+Fed 0 (j): Connected to 10.218.100.78:15045.
+Fed 0 (j): Starting timestamp is: 1764711104560384525.
+[Judge] Waiting for models
+[Judge] Ready
+Enter the quiz question (or 'quit')
+What is the opposite of tall?
+Enter the quiz question (or 'quit')
+
+Query: What is the opposite of tall?
 
 waiting...
 
-Winner: LLM-B | logical 1184 ms | physical 1184 ms
-Answer: Seoul.
+Winner: LLM-A | logical 0 ms | physical 378 ms
+A: The opposite of tall is short.
 --------------------------------------------------
 
 
From 947c9beee3d2b646ad52ba1637b4fd1bc6d12e2e Mon Sep 17 00:00:00 2001 From: Deeksha Prahlad <112724341+Deeksha-20-99@users.noreply.github.com> Date: Tue, 2 Dec 2025 15:22:32 -0700 Subject: [PATCH 54/54] Update .gitignore to include __pycache__ --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index eed972c..24254e6 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,3 @@ llm/src-gen/ llm/include/ llm/bin **__pycache__** -llm/=** \ No newline at end of file