From 41a44068d0178648208b7dc25ea00fdd77c8e43c Mon Sep 17 00:00:00 2001 From: Brett Kuprel Date: Thu, 30 Jun 2022 09:36:32 -0400 Subject: [PATCH] keep params in expendable mode --- README.md | 2 +- min_dalle/min_dalle_flax.py | 10 +++------- min_dalle/min_dalle_torch.py | 6 ++---- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 46724cc..a96edbd 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ This is a minimal implementation of Boris Dayma's [DALL·E Mini](https://github.com/borisdayma/dalle-mini). It has been stripped to the bare essentials necessary for doing inference, and converted to PyTorch. To run the torch model, the only third party dependencies are numpy and torch. Flax is used to convert the weights (which can be saved with `torch.save` once the model is loaded), and wandb is only used to download the models. -It currently takes **7.3 seconds** to generate an avocado armchair with DALL·E Mega in PyTorch on Colab +It currently takes **7.3 seconds** to generate an avocado armchair with DALL·E Mega in PyTorch on Colab (with nonexpendable model in high RAM runtime) ### Setup diff --git a/min_dalle/min_dalle_flax.py b/min_dalle/min_dalle_flax.py index b60b538..8886f90 100644 --- a/min_dalle/min_dalle_flax.py +++ b/min_dalle/min_dalle_flax.py @@ -28,7 +28,7 @@ class MinDalleFlax(MinDalleBase): text_token_count = self.config['max_text_length'], text_vocab_count = self.config['encoder_vocab_size'], layer_count = self.config['encoder_layers'] - ).bind({'params': self.model_params.pop('encoder')}) + ).bind({'params': self.model_params['encoder']}) def init_decoder(self): @@ -53,17 +53,13 @@ class MinDalleFlax(MinDalleBase): encoder_state = self.encoder(text_tokens) if self.is_expendable: del self.encoder - if self.is_expendable: - self.init_decoder() - params = self.model_params.pop('decoder') - else: - params = self.model_params['decoder'] + if self.is_expendable: self.init_decoder() print("sampling image tokens") image_tokens = self.decoder.sample_image_tokens( text_tokens, encoder_state, jax.random.PRNGKey(seed), - params + self.model_params['decoder'] ) if self.is_expendable: del self.decoder diff --git a/min_dalle/min_dalle_torch.py b/min_dalle/min_dalle_torch.py index bdfa662..0efffdf 100644 --- a/min_dalle/min_dalle_torch.py +++ b/min_dalle/min_dalle_torch.py @@ -40,13 +40,12 @@ class MinDalleTorch(MinDalleBase): glu_embed_count = self.config['encoder_ffn_dim'] ) params = convert_dalle_bart_torch_from_flax_params( - self.model_params.pop('encoder'), + self.model_params['encoder'], layer_count=self.config['encoder_layers'], is_encoder=True ) self.encoder.load_state_dict(params, strict=False) if torch.cuda.is_available(): self.encoder = self.encoder.cuda() - del params def init_decoder(self): @@ -64,13 +63,12 @@ class MinDalleTorch(MinDalleBase): is_verbose = True ) params = convert_dalle_bart_torch_from_flax_params( - self.model_params.pop('decoder'), + self.model_params['decoder'], layer_count=self.config['decoder_layers'], is_encoder=False ) self.decoder.load_state_dict(params, strict=False) if torch.cuda.is_available(): self.decoder = self.decoder.cuda() - del params def init_detokenizer(self):