From 20a77a58389afd90eced33cf834ad95c925a009b Mon Sep 17 00:00:00 2001 From: Tanner Collin Date: Thu, 26 Sep 2024 17:50:03 +0000 Subject: [PATCH] Handle llama errors --- main.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/main.py b/main.py index 3995f2a..6eff28f 100644 --- a/main.py +++ b/main.py @@ -13,9 +13,15 @@ NOTES_DIR = '/home/tanner/notes-git/notes' def llama(prompt): data = dict(model='llama3.1', prompt=prompt, stream=False) - r = requests.post(LLAMA_URL, json=data, timeout=10) - r = r.json() - return r['response'] + try: + r = requests.post(LLAMA_URL, json=data, timeout=10) + r.raise_for_status() + r = r.json() + return r['response'] + except BaseException as e: + logging.error('Problem with llama: {} - {}'.format(e.__class__.__name__, str(e))) + return False + def git_diff(): result = subprocess.run(['git', 'diff', '--cached', '-U0'], stdout=subprocess.PIPE, cwd=NOTES_DIR) @@ -49,6 +55,10 @@ Write in imperitive tense. message = llama(prompt) + if not message: + logging.info(' Failed to generate') + return False + logging.info('Generated message: %s', message) if '.md' in message.lower():