2022-06-29 13:42:12 +00:00
|
|
|
import os
|
|
|
|
import json
|
|
|
|
import numpy
|
|
|
|
|
|
|
|
from .text_tokenizer import TextTokenizer
|
|
|
|
from .load_params import load_vqgan_torch_params, load_dalle_bart_flax_params
|
|
|
|
from .models.vqgan_detokenizer import VQGanDetokenizer
|
|
|
|
|
2022-06-30 10:43:10 +00:00
|
|
|
class MinDalleBase:
|
2022-06-29 13:42:12 +00:00
|
|
|
def __init__(self, is_mega: bool):
|
|
|
|
self.is_mega = is_mega
|
|
|
|
model_name = 'dalle_bart_{}'.format('mega' if is_mega else 'mini')
|
2022-06-30 18:54:08 +00:00
|
|
|
self.model_path = os.path.join('pretrained', model_name)
|
2022-06-29 13:42:12 +00:00
|
|
|
|
2022-06-30 18:54:08 +00:00
|
|
|
print("reading files from {}".format(self.model_path))
|
|
|
|
config_path = os.path.join(self.model_path, 'config.json')
|
|
|
|
vocab_path = os.path.join(self.model_path, 'vocab.json')
|
|
|
|
merges_path = os.path.join(self.model_path, 'merges.txt')
|
2022-06-29 18:18:23 +00:00
|
|
|
|
|
|
|
with open(config_path, 'r', encoding='utf8') as f:
|
2022-06-29 13:42:12 +00:00
|
|
|
self.config = json.load(f)
|
2022-06-29 18:18:23 +00:00
|
|
|
with open(vocab_path, 'r', encoding='utf8') as f:
|
2022-06-29 13:42:12 +00:00
|
|
|
vocab = json.load(f)
|
2022-06-29 18:18:23 +00:00
|
|
|
with open(merges_path, 'r', encoding='utf8') as f:
|
2022-06-29 13:42:12 +00:00
|
|
|
merges = f.read().split("\n")[1:-1]
|
2022-06-29 18:18:23 +00:00
|
|
|
|
2022-06-29 13:42:12 +00:00
|
|
|
self.tokenizer = TextTokenizer(vocab, merges)
|
2022-06-30 10:43:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
def init_detokenizer(self):
|
|
|
|
print("initializing VQGanDetokenizer")
|
|
|
|
params = load_vqgan_torch_params('./pretrained/vqgan')
|
2022-06-29 13:42:12 +00:00
|
|
|
self.detokenizer = VQGanDetokenizer()
|
2022-06-30 10:43:10 +00:00
|
|
|
self.detokenizer.load_state_dict(params)
|
|
|
|
del params
|
2022-06-29 13:42:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
def tokenize_text(self, text: str) -> numpy.ndarray:
|
|
|
|
print("tokenizing text")
|
|
|
|
tokens = self.tokenizer.tokenize(text)
|
|
|
|
print("text tokens", tokens)
|
|
|
|
text_token_count = self.config['max_text_length']
|
|
|
|
text_tokens = numpy.ones((2, text_token_count), dtype=numpy.int32)
|
|
|
|
text_tokens[0, :len(tokens)] = tokens
|
|
|
|
text_tokens[1, :2] = [tokens[0], tokens[-1]]
|
|
|
|
return text_tokens
|