From 7d549505da2541811fd0c73648f7aaefb393a660 Mon Sep 17 00:00:00 2001 From: w4ffl35 <25737761+w4ffl35@users.noreply.github.com> Date: Mon, 4 Jul 2022 00:26:17 -0600 Subject: [PATCH] Fixes #16 - mega model running out of memory Prior to this fix the mega model will (more often than not) fail when running in succession. Clearing the cache seems to fix the issue. --- min_dalle/min_dalle.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/min_dalle/min_dalle.py b/min_dalle/min_dalle.py index c47ad39..9f12858 100644 --- a/min_dalle/min_dalle.py +++ b/min_dalle/min_dalle.py @@ -142,6 +142,7 @@ class MinDalle: params = torch.load(self.detoker_params_path) self.detokenizer.load_state_dict(params) del params + torch.cuda.empty_cache() if torch.cuda.is_available(): self.detokenizer = self.detokenizer.cuda() @@ -175,6 +176,7 @@ class MinDalle: encoder_state ) if not self.is_reusable: del self.decoder + torch.cuda.empty_cache() return image_tokens