Don't keep model in memory, breaks Stable Diffusion
This commit is contained in:
parent
dcd8af2895
commit
641eadb253
2
main.py
2
main.py
|
@ -23,7 +23,7 @@ def controller_message(message):
|
|||
return False
|
||||
|
||||
def llama(prompt):
|
||||
data = dict(model='llama3.1', prompt=prompt, stream=False, keep_alive=-1)
|
||||
data = dict(model='llama3.1', prompt=prompt, stream=False, keep_alive=25) # don't keep in memory, interferes with Stable Diffusion
|
||||
try:
|
||||
r = requests.post(LLAMA_URL, json=data, timeout=20)
|
||||
r.raise_for_status()
|
||||
|
|
Loading…
Reference in New Issue
Block a user