keep params in expendable mode
This commit is contained in:
parent
df9aa6f915
commit
41a44068d0
2
README.md
vendored
2
README.md
vendored
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
This is a minimal implementation of Boris Dayma's [DALL·E Mini](https://github.com/borisdayma/dalle-mini). It has been stripped to the bare essentials necessary for doing inference, and converted to PyTorch. To run the torch model, the only third party dependencies are numpy and torch. Flax is used to convert the weights (which can be saved with `torch.save` once the model is loaded), and wandb is only used to download the models.
|
This is a minimal implementation of Boris Dayma's [DALL·E Mini](https://github.com/borisdayma/dalle-mini). It has been stripped to the bare essentials necessary for doing inference, and converted to PyTorch. To run the torch model, the only third party dependencies are numpy and torch. Flax is used to convert the weights (which can be saved with `torch.save` once the model is loaded), and wandb is only used to download the models.
|
||||||
|
|
||||||
It currently takes **7.3 seconds** to generate an avocado armchair with DALL·E Mega in PyTorch on Colab
|
It currently takes **7.3 seconds** to generate an avocado armchair with DALL·E Mega in PyTorch on Colab (with nonexpendable model in high RAM runtime)
|
||||||
|
|
||||||
### Setup
|
### Setup
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ class MinDalleFlax(MinDalleBase):
|
||||||
text_token_count = self.config['max_text_length'],
|
text_token_count = self.config['max_text_length'],
|
||||||
text_vocab_count = self.config['encoder_vocab_size'],
|
text_vocab_count = self.config['encoder_vocab_size'],
|
||||||
layer_count = self.config['encoder_layers']
|
layer_count = self.config['encoder_layers']
|
||||||
).bind({'params': self.model_params.pop('encoder')})
|
).bind({'params': self.model_params['encoder']})
|
||||||
|
|
||||||
|
|
||||||
def init_decoder(self):
|
def init_decoder(self):
|
||||||
|
@ -53,17 +53,13 @@ class MinDalleFlax(MinDalleBase):
|
||||||
encoder_state = self.encoder(text_tokens)
|
encoder_state = self.encoder(text_tokens)
|
||||||
if self.is_expendable: del self.encoder
|
if self.is_expendable: del self.encoder
|
||||||
|
|
||||||
if self.is_expendable:
|
if self.is_expendable: self.init_decoder()
|
||||||
self.init_decoder()
|
|
||||||
params = self.model_params.pop('decoder')
|
|
||||||
else:
|
|
||||||
params = self.model_params['decoder']
|
|
||||||
print("sampling image tokens")
|
print("sampling image tokens")
|
||||||
image_tokens = self.decoder.sample_image_tokens(
|
image_tokens = self.decoder.sample_image_tokens(
|
||||||
text_tokens,
|
text_tokens,
|
||||||
encoder_state,
|
encoder_state,
|
||||||
jax.random.PRNGKey(seed),
|
jax.random.PRNGKey(seed),
|
||||||
params
|
self.model_params['decoder']
|
||||||
)
|
)
|
||||||
if self.is_expendable: del self.decoder
|
if self.is_expendable: del self.decoder
|
||||||
|
|
||||||
|
|
|
@ -40,13 +40,12 @@ class MinDalleTorch(MinDalleBase):
|
||||||
glu_embed_count = self.config['encoder_ffn_dim']
|
glu_embed_count = self.config['encoder_ffn_dim']
|
||||||
)
|
)
|
||||||
params = convert_dalle_bart_torch_from_flax_params(
|
params = convert_dalle_bart_torch_from_flax_params(
|
||||||
self.model_params.pop('encoder'),
|
self.model_params['encoder'],
|
||||||
layer_count=self.config['encoder_layers'],
|
layer_count=self.config['encoder_layers'],
|
||||||
is_encoder=True
|
is_encoder=True
|
||||||
)
|
)
|
||||||
self.encoder.load_state_dict(params, strict=False)
|
self.encoder.load_state_dict(params, strict=False)
|
||||||
if torch.cuda.is_available(): self.encoder = self.encoder.cuda()
|
if torch.cuda.is_available(): self.encoder = self.encoder.cuda()
|
||||||
del params
|
|
||||||
|
|
||||||
|
|
||||||
def init_decoder(self):
|
def init_decoder(self):
|
||||||
|
@ -64,13 +63,12 @@ class MinDalleTorch(MinDalleBase):
|
||||||
is_verbose = True
|
is_verbose = True
|
||||||
)
|
)
|
||||||
params = convert_dalle_bart_torch_from_flax_params(
|
params = convert_dalle_bart_torch_from_flax_params(
|
||||||
self.model_params.pop('decoder'),
|
self.model_params['decoder'],
|
||||||
layer_count=self.config['decoder_layers'],
|
layer_count=self.config['decoder_layers'],
|
||||||
is_encoder=False
|
is_encoder=False
|
||||||
)
|
)
|
||||||
self.decoder.load_state_dict(params, strict=False)
|
self.decoder.load_state_dict(params, strict=False)
|
||||||
if torch.cuda.is_available(): self.decoder = self.decoder.cuda()
|
if torch.cuda.is_available(): self.decoder = self.decoder.cuda()
|
||||||
del params
|
|
||||||
|
|
||||||
|
|
||||||
def init_detokenizer(self):
|
def init_detokenizer(self):
|
||||||
|
|
Loading…
Reference in New Issue
Block a user