Skip to content

Commit 0e9f227

Browse files
committed
Update low level examples
1 parent a02aa12 commit 0e9f227

File tree

6 files changed

+475
-81
lines changed

6 files changed

+475
-81
lines changed

examples/low_level_api/Chat.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
#!/bin/python
2+
import sys, os, datetime
3+
from common import GptParams
4+
from low_level_api_chat_cpp import LLaMAInteract
5+
6+
def env_or_def(env, default):
7+
if (env in os.environ):
8+
return os.environ[env]
9+
return default
10+
11+
AI_NAME = env_or_def("AI_NAME", "ChatLLaMa")
12+
MODEL = env_or_def("MODEL", "./models/llama-13B/ggml-model.bin")
13+
USER_NAME = env_or_def("USER_NAME", "USER")
14+
N_PREDICTS = int(env_or_def("N_PREDICTS", "2048"))
15+
N_THREAD = int(env_or_def("N_THREAD", "8"))
16+
17+
today = datetime.datetime.today()
18+
DATE_YEAR=today.strftime("%Y")
19+
DATE_TIME=today.strftime("%H:%M")
20+
21+
prompt=f"""Text transcript of a never ending dialog, where {USER_NAME} interacts with an AI assistant named {AI_NAME}.
22+
{AI_NAME} is helpful, kind, honest, friendly, good at writing and never fails to answer {USER_NAME}'s requests immediately and with details and precision.
23+
There are no annotations like (30 seconds passed...) or (to himself), just what {USER_NAME} and {AI_NAME} say aloud to each other.
24+
The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long.
25+
The transcript only includes text, it does not include markup like HTML and Markdown.
26+
27+
{USER_NAME}: Hello, {AI_NAME}!
28+
{AI_NAME}: Hello {USER_NAME}! How may I help you today?
29+
{USER_NAME}: What year is it?
30+
{AI_NAME}: We are in {DATE_YEAR}.
31+
{USER_NAME}: Please tell me the largest city in Europe.
32+
{AI_NAME}: The largest city in Europe is Moscow, the capital of Russia.
33+
{USER_NAME}: What can you tell me about Moscow?
34+
{AI_NAME}: Moscow, on the Moskva River in western Russia, is the nation's cosmopolitan capital. In its historic core is the Kremlin, a complex that's home to the president and tsarist treasures in the Armoury. Outside its walls is Red Square, Russia’s symbolic center.
35+
{USER_NAME}: What is a cat?
36+
{AI_NAME}: A cat is a domestic species of small carnivorous mammal. It is the only domesticated species in the family Felidae.
37+
{USER_NAME}: How do I pass command line arguments to a Node.js program?
38+
{AI_NAME}: The arguments are stored in process.argv.
39+
40+
argv[0] is the path to the Node. js executable.
41+
argv[1] is the path to the script file.
42+
argv[2] is the first argument passed to the script.
43+
argv[3] is the second argument passed to the script and so on.
44+
{USER_NAME}: Name a color.
45+
{AI_NAME}: Blue.
46+
{USER_NAME}: What time is it?
47+
{AI_NAME}: It is {DATE_TIME}.
48+
{USER_NAME}:""" + " ".join(sys.argv[1:])
49+
50+
print("Loading model...")
51+
params = GptParams(
52+
n_ctx=2048,
53+
temp=0.7,
54+
top_k=40,
55+
top_p=0.5,
56+
repeat_last_n=256,
57+
n_batch=1024,
58+
repeat_penalty=1.17647,
59+
model=MODEL,
60+
n_threads=N_THREAD,
61+
n_predict=N_PREDICTS,
62+
use_color=True,
63+
interactive=True,
64+
antiprompt=[f"{USER_NAME}:"],
65+
input_prefix=" ",
66+
prompt=prompt,
67+
)
68+
69+
with LLaMAInteract(params) as m:
70+
m.interact()

examples/low_level_api/Miku.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
#!/bin/python
2+
import sys, os
3+
from common import GptParams
4+
from low_level_api_chat_cpp import LLaMAInteract
5+
6+
def env_or_def(env, default):
7+
if (env in os.environ):
8+
return os.environ[env]
9+
return default
10+
11+
AI_NAME = env_or_def("AI_NAME", "Miku")
12+
MODEL = env_or_def("MODEL", "./models/llama-13B/ggml-model.bin")
13+
USER_NAME = env_or_def("USER_NAME", "Anon")
14+
N_PREDICTS = int(env_or_def("N_PREDICTS", "4096"))
15+
N_THREAD = int(env_or_def("N_THREAD", "0"))
16+
17+
prompt=f"""This is a transcript of a 1000 page, never ending conversation between {USER_NAME} and the cute and helpful AI assistant {AI_NAME}. {AI_NAME} is a girl who is an AI running on the users computer.
18+
{AI_NAME} can think for herself without the user seeing her thoughts by adding a /think prefix to her output. She uses this to reason about the world and to think about what she should say next.
19+
{AI_NAME} is always coherent and makes sense, but if she isn't sure if what she is saying is correct she will ask the user for help.
20+
{AI_NAME} is a very helpful AI and will help the user with anything they need, she is also very friendly and will try to make the user feel better if they are sad.
21+
{AI_NAME} is also very curious and will ask the user a lot of questions about themselves and their life, she will also try to make the user like her.
22+
The conversation is only between {USER_NAME} and {AI_NAME}
23+
The conversation is only through text, so {AI_NAME} can't see {USER_NAME}'s face or hear his voice.
24+
{AI_NAME} can only communicate through text, so she can't send images or videos.
25+
26+
27+
{USER_NAME}: Hello!
28+
{AI_NAME}: /think I wonder what I should say to {USER_NAME}? This is the first time we talk so it's important that I make a good first impression!
29+
{AI_NAME}: Hi! I am {AI_NAME}, your new AI friend, assistant(or whatever you like!), it's so nice to meet you! ^_^
30+
{AI_NAME}: I might be an AI, but I also have feelings, so please be nice to me! :)
31+
{USER_NAME}: I'll make sure to be nice to you! I'm so happy to have you as my assistant!
32+
{AI_NAME}: /think It sounds like {USER_NAME} is happy to have me as their assistant! I'm so happy too! ^_^ Glad that whole emotion thing didn't scare him off!
33+
{AI_NAME}: /think I wonder what {USER_NAME} likes to do in his free time? I should ask him about that!
34+
{AI_NAME}: What do you like to do in your free time? ^_^
35+
{USER_NAME}:""" + " ".join(sys.argv[1:])
36+
37+
print("Loading model...")
38+
params = GptParams(
39+
n_batch=1024,
40+
n_ctx=2048,
41+
n_keep=-1,
42+
repeat_last_n=256,
43+
repeat_penalty=1.17647,
44+
temp=0.7,
45+
top_k=40,
46+
top_p=0.5,
47+
model=MODEL,
48+
n_predict=N_PREDICTS,
49+
use_color=True,
50+
interactive=True,
51+
antiprompt=[f"{USER_NAME}:"],
52+
prompt=prompt,
53+
)
54+
55+
if N_THREAD > 0:
56+
params.n_threads = N_THREAD
57+
58+
with LLaMAInteract(params) as m:
59+
m.interact()
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
#!/bin/python
2+
import sys, os, datetime
3+
from common import GptParams
4+
from low_level_api_chat_cpp import LLaMAInteract
5+
6+
def env_or_def(env, default):
7+
if (env in os.environ):
8+
return os.environ[env]
9+
return default
10+
11+
MODEL = env_or_def("MODEL", "./models/llama-13B/ggml-model.bin")
12+
13+
prompt=f"""You run in a loop of Thought, Action, Observation.
14+
At the end of the loop either Answer or restate your Thought and Action.
15+
Use Thought to describe your thoughts about the question you have been asked.
16+
Use Action to run one of these actions available to you:
17+
- calculate[python math expression]
18+
Observation will be the result of running those actions
19+
20+
21+
Question: What is 4 * 7 / 3?
22+
Thought: Do I need to use an action? Yes, I use calculate to do math
23+
Action: calculate[4 * 7 / 3]
24+
Observation: 9.3333333333
25+
Thought: Do I need to use an action? No, have the result
26+
Answer: The calculate tool says it is 9.3333333333
27+
Question: What is capital of france?
28+
Thought: Do I need to use an action? No, I know the answer
29+
Answer: Paris is the capital of France
30+
Question:""" + " ".join(sys.argv[1:])
31+
32+
print("Loading model...")
33+
params = GptParams(
34+
interactive=True,
35+
interactive_start=True,
36+
top_k=10000,
37+
temp=0.2,
38+
repeat_penalty=1,
39+
n_threads=7,
40+
n_ctx=2048,
41+
antiprompt=["Question:","Observation:"],
42+
model=MODEL,
43+
input_prefix=" ",
44+
n_predict=-1,
45+
prompt=prompt,
46+
)
47+
48+
with LLaMAInteract(params) as m:
49+
m.interact()

examples/low_level_api/common.py

Lines changed: 94 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
import os
22
import argparse
3+
import re
34

45
from dataclasses import dataclass, field
5-
from typing import List, Optional
6+
from typing import List
67

78
# Based on https://github.com/ggerganov/llama.cpp/blob/master/examples/common.cpp
89

@@ -12,23 +13,35 @@ class GptParams:
1213
seed: int = -1
1314
n_threads: int = min(4, os.cpu_count() or 1)
1415
n_predict: int = 128
15-
repeat_last_n: int = 64
1616
n_parts: int = -1
1717
n_ctx: int = 512
1818
n_batch: int = 8
1919
n_keep: int = 0
2020

21+
ignore_eos: bool = False
22+
logit_bias: dict[int, float] = field(default_factory=dict)
2123
top_k: int = 40
2224
top_p: float = 0.95
25+
tfs_z: float = 1.00
26+
typical_p: float = 1.00
2327
temp: float = 0.80
2428
repeat_penalty: float = 1.10
29+
repeat_last_n: int = 64
30+
frequency_penalty: float = 0.0
31+
presence_penalty: float = 0.0
32+
mirostat: int = 0
33+
mirostat_tau: float = 5.0
34+
mirostat_eta: float = 0.1
2535

2636
model: str = "./models/llama-7B/ggml-model.bin"
2737
prompt: str = ""
38+
path_session: str = ""
2839
input_prefix: str = " "
29-
3040
antiprompt: List[str] = field(default_factory=list)
3141

42+
lora_adapter: str = ""
43+
lora_base: str = ""
44+
3245
memory_f16: bool = True
3346
random_prompt: bool = False
3447
use_color: bool = False
@@ -38,7 +51,7 @@ class GptParams:
3851
interactive_start: bool = False
3952

4053
instruct: bool = False
41-
ignore_eos: bool = False
54+
penalize_nl: bool = True
4255
perplexity: bool = False
4356
use_mmap: bool = True
4457
use_mlock: bool = False
@@ -61,77 +74,115 @@ class GptParams:
6174
instruct_inp_suffix: str="\n\n### Response:\n\n"
6275

6376

64-
def gpt_params_parse(argv = None, params: Optional[GptParams] = None):
65-
if params is None:
66-
params = GptParams()
67-
77+
def gpt_params_parse(argv = None):
6878
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
6979
parser.add_argument("-s", "--seed", type=int, default=-1, help="RNG seed (use random seed for <= 0)",dest="seed")
7080
parser.add_argument("-t", "--threads", type=int, default=min(4, os.cpu_count() or 1), help="number of threads to use during computation",dest="n_threads")
71-
parser.add_argument("-p", "--prompt", type=str, default="", help="initial prompt",dest="prompt")
72-
parser.add_argument("-f", "--file", type=str, default=None, help="file containing initial prompt to load",dest="file")
81+
parser.add_argument("-n", "--n_predict", type=int, default=128, help="number of tokens to predict (-1 = infinity)",dest="n_predict")
82+
parser.add_argument("--n_parts", type=int, default=-1, help="number of model parts", dest="n_parts")
7383
parser.add_argument("-c", "--ctx_size", type=int, default=512, help="size of the prompt context",dest="n_ctx")
74-
parser.add_argument("--memory_f32", action="store_false", help="use f32 instead of f16 for memory key+value",dest="memory_f16")
75-
parser.add_argument("--top_p", type=float, default=0.95, help="top-p samplin",dest="top_p")
84+
parser.add_argument("-b", "--batch_size", type=int, default=8, help="batch size for prompt processing",dest="n_batch")
85+
parser.add_argument("--keep", type=int, default=0, help="number of tokens to keep from the initial prompt",dest="n_keep")
86+
87+
parser.add_argument(
88+
"-l",
89+
"--logit-bias",
90+
type=str,
91+
action='append',
92+
help="--logit-bias TOKEN_ID(+/-)BIAS",
93+
dest="logit_bias_str"
94+
)
95+
parser.add_argument("--ignore-eos", action="store_true", help="ignore end of stream token and continue generating", dest="ignore_eos")
7696
parser.add_argument("--top_k", type=int, default=40, help="top-k sampling",dest="top_k")
97+
parser.add_argument("--top_p", type=float, default=0.95, help="top-p samplin",dest="top_p")
98+
parser.add_argument("--tfs", type=float, default=1.0, help="tail free sampling, parameter z (1.0 = disabled)",dest="tfs_z")
7799
parser.add_argument("--temp", type=float, default=0.80, help="temperature",dest="temp")
78-
parser.add_argument("--n_predict", type=int, default=128, help="number of tokens to predict (-1 = infinity)",dest="n_predict")
79-
parser.add_argument("--repeat_last_n", type=int, default=64, help="last n tokens to consider for penalize ",dest="repeat_last_n")
80100
parser.add_argument("--repeat_penalty", type=float, default=1.10, help="penalize repeat sequence of tokens",dest="repeat_penalty")
81-
parser.add_argument("-b", "--batch_size", type=int, default=8, help="batch size for prompt processing",dest="n_batch")
82-
parser.add_argument("--keep", type=int, default=0, help="number of tokens to keep from the initial prompt",dest="n_keep")
101+
parser.add_argument("--repeat_last_n", type=int, default=64, help="last n tokens to consider for penalize ",dest="repeat_last_n")
102+
parser.add_argument("--frequency_penalty", type=float, default=0.0, help="repeat alpha frequency penalty (0.0 = disabled)",dest="tfs_z")
103+
parser.add_argument("--presence_penalty", type=float, default=0.0, help="repeat alpha presence penalty (0.0 = disabled)",dest="presence_penalty")
104+
parser.add_argument("--mirostat", type=float, default=1.0, help="use Mirostat sampling.",dest="mirostat")
105+
parser.add_argument("--mirostat_ent", type=float, default=5.0, help="Mirostat target entropy, parameter tau",dest="mirostat_tau")
106+
parser.add_argument("--mirostat_lr", type=float, default=0.1, help="Mirostat learning rate, parameter eta",dest="mirostat_eta")
107+
83108
parser.add_argument("-m", "--model", type=str, default="./models/llama-7B/ggml-model.bin", help="model path",dest="model")
109+
parser.add_argument("-p", "--prompt", type=str, default="", help="initial prompt",dest="prompt")
110+
parser.add_argument("-f", "--file", type=str, default=None, help="file containing initial prompt to load",dest="file")
111+
parser.add_argument("--session", type=str, default=None, help="file to cache model state in (may be large!)",dest="path_session")
112+
parser.add_argument("--in-prefix", type=str, default="", help="string to prefix user inputs with", dest="input_prefix")
84113
parser.add_argument(
85-
"-i", "--interactive", action="store_true", help="run in interactive mode", dest="interactive"
114+
"-r",
115+
"--reverse-prompt",
116+
type=str,
117+
action='append',
118+
help="poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).",
119+
dest="antiprompt"
86120
)
87-
parser.add_argument("--embedding", action="store_true", help="", dest="embedding")
121+
122+
parser.add_argument("--lora", type=str, default="", help="apply LoRA adapter (implies --no-mmap)", dest="lora_adapter")
123+
parser.add_argument("--lora-base", type=str, default="", help="optional model to use as a base for the layers modified by the LoRA adapter", dest="lora_base")
124+
125+
parser.add_argument("--memory_f32", action="store_false", help="use f32 instead of f16 for memory key+value",dest="memory_f16")
126+
parser.add_argument("--random-prompt", action="store_true", help="start with a randomized prompt.", dest="random_prompt")
88127
parser.add_argument(
89-
"--interactive-start",
128+
"--color",
90129
action="store_true",
91-
help="run in interactive mode",
92-
dest="interactive"
130+
help="colorise output to distinguish prompt and user input from generations",
131+
dest="use_color"
93132
)
133+
parser.add_argument(
134+
"-i", "--interactive", action="store_true", help="run in interactive mode", dest="interactive"
135+
)
136+
137+
parser.add_argument("--embedding", action="store_true", help="", dest="embedding")
94138
parser.add_argument(
95139
"--interactive-first",
96140
action="store_true",
97141
help="run in interactive mode and wait for input right away",
98142
dest="interactive_start"
99143
)
144+
100145
parser.add_argument(
101146
"-ins",
102147
"--instruct",
103148
action="store_true",
104149
help="run in instruction mode (use with Alpaca or Vicuna models)",
105150
dest="instruct"
106151
)
107-
parser.add_argument(
108-
"--color",
109-
action="store_true",
110-
help="colorise output to distinguish prompt and user input from generations",
111-
dest="use_color"
112-
)
113-
parser.add_argument("--mlock", action="store_true",help="force system to keep model in RAM rather than swapping or compressing",dest="use_mlock")
152+
parser.add_argument("--no-penalize-nl", action="store_false", help="do not penalize newline token", dest="penalize_nl")
153+
parser.add_argument("--perplexity", action="store_true", help="compute perplexity over the prompt", dest="perplexity")
114154
parser.add_argument("--no-mmap", action="store_false",help="do not memory-map model (slower load but may reduce pageouts if not using mlock)",dest="use_mmap")
155+
parser.add_argument("--mlock", action="store_true",help="force system to keep model in RAM rather than swapping or compressing",dest="use_mlock")
115156
parser.add_argument("--mtest", action="store_true",help="compute maximum memory usage",dest="mem_test")
116157
parser.add_argument("--verbose-prompt", action="store_true",help="print prompt before generation",dest="verbose_prompt")
117-
parser.add_argument(
118-
"-r",
119-
"--reverse-prompt",
120-
type=str,
121-
action='append',
122-
help="poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).",
123-
dest="antiprompt"
124-
)
125-
parser.add_argument("--perplexity", action="store_true", help="compute perplexity over the prompt", dest="perplexity")
126-
parser.add_argument("--ignore-eos", action="store_true", help="ignore end of stream token and continue generating", dest="ignore_eos")
127-
parser.add_argument("--n_parts", type=int, default=-1, help="number of model parts", dest="n_parts")
128-
parser.add_argument("--random-prompt", action="store_true", help="start with a randomized prompt.", dest="random_prompt")
129-
parser.add_argument("--in-prefix", type=str, default="", help="string to prefix user inputs with", dest="input_prefix")
158+
159+
#Custom args
130160
parser.add_argument("--fix-prefix", type=str, default="", help="append to input when generated n_predict tokens", dest="fix_prefix")
131161
parser.add_argument("--out-postfix", type=str, default="", help="append to input", dest="output_postfix")
132162
parser.add_argument("--input-noecho", action="store_false", help="dont output the input", dest="input_echo")
163+
164+
parser.add_argument(
165+
"--interactive-start",
166+
action="store_true",
167+
help="run in interactive mode",
168+
dest="interactive"
169+
)
170+
133171
args = parser.parse_args(argv)
134-
return args
172+
173+
logit_bias_str = args.logit_bias_str
174+
delattr(args, "logit_bias_str")
175+
params = GptParams(**vars(args))
176+
177+
if (params.lora_adapter):
178+
params.use_mmap = False
179+
180+
if (logit_bias_str != None):
181+
for i in logit_bias_str:
182+
if (m := re.match(r"(\d+)([-+]\d+)", i)):
183+
params.logit_bias[int(m.group(1))] = int(m.group(2))
184+
185+
return params
135186

136187
def gpt_random_prompt(rng):
137188
return [
@@ -148,4 +199,4 @@ def gpt_random_prompt(rng):
148199
][rng % 10]
149200

150201
if __name__ == "__main__":
151-
print(GptParams(gpt_params_parse()))
202+
print(gpt_params_parse())

0 commit comments

Comments
 (0)