add temperature parameter
This commit is contained in:
parent
2feabd7847
commit
d64e957731
2
cog.yaml
vendored
2
cog.yaml
vendored
|
@ -6,7 +6,7 @@ build:
|
||||||
- "libgl1-mesa-glx"
|
- "libgl1-mesa-glx"
|
||||||
- "libglib2.0-0"
|
- "libglib2.0-0"
|
||||||
python_packages:
|
python_packages:
|
||||||
- "min-dalle==0.3.11"
|
- "min-dalle==0.3.12"
|
||||||
run:
|
run:
|
||||||
- pip install torch==1.12.0+cu116 -f https://download.pytorch.org/whl/torch_stable.html
|
- pip install torch==1.12.0+cu116 -f https://download.pytorch.org/whl/torch_stable.html
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ parser.add_argument('--seed', type=int, default=-1)
|
||||||
parser.add_argument('--grid-size', type=int, default=1)
|
parser.add_argument('--grid-size', type=int, default=1)
|
||||||
parser.add_argument('--image-path', type=str, default='generated')
|
parser.add_argument('--image-path', type=str, default='generated')
|
||||||
parser.add_argument('--models-root', type=str, default='pretrained')
|
parser.add_argument('--models-root', type=str, default='pretrained')
|
||||||
|
parser.add_argument('--top_k', type=int, default=256)
|
||||||
|
|
||||||
|
|
||||||
def ascii_from_image(image: Image.Image, size: int = 128) -> str:
|
def ascii_from_image(image: Image.Image, size: int = 128) -> str:
|
||||||
|
@ -38,6 +39,7 @@ def generate_image(
|
||||||
text: str,
|
text: str,
|
||||||
seed: int,
|
seed: int,
|
||||||
grid_size: int,
|
grid_size: int,
|
||||||
|
top_k: int,
|
||||||
image_path: str,
|
image_path: str,
|
||||||
models_root: str
|
models_root: str
|
||||||
):
|
):
|
||||||
|
@ -48,7 +50,13 @@ def generate_image(
|
||||||
is_verbose=True
|
is_verbose=True
|
||||||
)
|
)
|
||||||
|
|
||||||
image = model.generate_image(text, seed, grid_size, is_verbose=True)
|
image = model.generate_image(
|
||||||
|
text,
|
||||||
|
seed,
|
||||||
|
grid_size,
|
||||||
|
top_k=top_k,
|
||||||
|
is_verbose=True
|
||||||
|
)
|
||||||
save_image(image, image_path)
|
save_image(image, image_path)
|
||||||
print(ascii_from_image(image, size=128))
|
print(ascii_from_image(image, size=128))
|
||||||
|
|
||||||
|
@ -61,6 +69,7 @@ if __name__ == '__main__':
|
||||||
text=args.text,
|
text=args.text,
|
||||||
seed=args.seed,
|
seed=args.seed,
|
||||||
grid_size=args.grid_size,
|
grid_size=args.grid_size,
|
||||||
|
top_k=args.top_k,
|
||||||
image_path=args.image_path,
|
image_path=args.image_path,
|
||||||
models_root=args.models_root
|
models_root=args.models_root
|
||||||
)
|
)
|
|
@ -177,8 +177,9 @@ class MinDalle:
|
||||||
seed: int,
|
seed: int,
|
||||||
image_count: int,
|
image_count: int,
|
||||||
log2_mid_count: int,
|
log2_mid_count: int,
|
||||||
log2_k: int = 6,
|
temperature: float = 1,
|
||||||
log2_supercondition_factor: int = 3,
|
top_k: int = 256,
|
||||||
|
supercondition_factor: int = 16,
|
||||||
is_verbose: bool = False
|
is_verbose: bool = False
|
||||||
) -> Iterator[FloatTensor]:
|
) -> Iterator[FloatTensor]:
|
||||||
assert(log2_mid_count in range(5))
|
assert(log2_mid_count in range(5))
|
||||||
|
@ -206,10 +207,10 @@ class MinDalle:
|
||||||
with torch.cuda.amp.autocast(dtype=self.dtype):
|
with torch.cuda.amp.autocast(dtype=self.dtype):
|
||||||
encoder_state, attention_mask, attention_state, image_tokens = (
|
encoder_state, attention_mask, attention_state, image_tokens = (
|
||||||
self.decoder.decode_initial(
|
self.decoder.decode_initial(
|
||||||
seed,
|
seed=seed,
|
||||||
image_count,
|
image_count=image_count,
|
||||||
text_tokens,
|
text_tokens=text_tokens,
|
||||||
encoder_state
|
encoder_state=encoder_state
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -220,12 +221,13 @@ class MinDalle:
|
||||||
with torch.cuda.amp.autocast(dtype=self.dtype):
|
with torch.cuda.amp.autocast(dtype=self.dtype):
|
||||||
attention_state, image_tokens = self.decoder.decode_row(
|
attention_state, image_tokens = self.decoder.decode_row(
|
||||||
row_index,
|
row_index,
|
||||||
log2_k,
|
temperature=temperature,
|
||||||
log2_supercondition_factor,
|
top_k=top_k,
|
||||||
encoder_state,
|
supercondition_factor=supercondition_factor,
|
||||||
attention_mask,
|
encoder_state=encoder_state,
|
||||||
attention_state,
|
attention_mask=attention_mask,
|
||||||
image_tokens
|
attention_state=attention_state,
|
||||||
|
image_tokens_sequence=image_tokens
|
||||||
)
|
)
|
||||||
with torch.cuda.amp.autocast(dtype=torch.float32):
|
with torch.cuda.amp.autocast(dtype=torch.float32):
|
||||||
if ((row_index + 1) * (2 ** log2_mid_count)) % row_count == 0:
|
if ((row_index + 1) * (2 ** log2_mid_count)) % row_count == 0:
|
||||||
|
@ -240,18 +242,20 @@ class MinDalle:
|
||||||
seed: int,
|
seed: int,
|
||||||
grid_size: int,
|
grid_size: int,
|
||||||
log2_mid_count: int,
|
log2_mid_count: int,
|
||||||
log2_k: int = 6,
|
temperature: float = 1,
|
||||||
log2_supercondition_factor: int = 3,
|
top_k: int = 256,
|
||||||
|
supercondition_factor: int = 16,
|
||||||
is_verbose: bool = False
|
is_verbose: bool = False
|
||||||
) -> Iterator[Image.Image]:
|
) -> Iterator[Image.Image]:
|
||||||
images_stream = self.generate_images_stream(
|
images_stream = self.generate_images_stream(
|
||||||
text,
|
text=text,
|
||||||
seed,
|
seed=seed,
|
||||||
grid_size ** 2,
|
image_count=grid_size ** 2,
|
||||||
log2_mid_count,
|
log2_mid_count=log2_mid_count,
|
||||||
log2_k,
|
temperature=temperature,
|
||||||
log2_supercondition_factor,
|
top_k=top_k,
|
||||||
is_verbose
|
supercondition_factor=supercondition_factor,
|
||||||
|
is_verbose=is_verbose
|
||||||
)
|
)
|
||||||
for images in images_stream:
|
for images in images_stream:
|
||||||
yield self.grid_from_images(images)
|
yield self.grid_from_images(images)
|
||||||
|
@ -262,19 +266,21 @@ class MinDalle:
|
||||||
text: str,
|
text: str,
|
||||||
seed: int = -1,
|
seed: int = -1,
|
||||||
image_count: int = 1,
|
image_count: int = 1,
|
||||||
log2_k: int = 6,
|
temperature: float = 1,
|
||||||
log2_supercondition_factor: int = 3,
|
top_k: int = 1024,
|
||||||
|
supercondition_factor: int = 16,
|
||||||
is_verbose: bool = False
|
is_verbose: bool = False
|
||||||
) -> FloatTensor:
|
) -> FloatTensor:
|
||||||
log2_mid_count = 0
|
log2_mid_count = 0
|
||||||
images_stream = self.generate_images_stream(
|
images_stream = self.generate_images_stream(
|
||||||
text,
|
text=text,
|
||||||
seed,
|
seed=seed,
|
||||||
image_count,
|
image_count=image_count,
|
||||||
log2_mid_count,
|
temperature=temperature,
|
||||||
log2_k,
|
log2_mid_count=log2_mid_count,
|
||||||
log2_supercondition_factor,
|
top_k=top_k,
|
||||||
is_verbose
|
supercondition_factor=supercondition_factor,
|
||||||
|
is_verbose=is_verbose
|
||||||
)
|
)
|
||||||
return next(images_stream)
|
return next(images_stream)
|
||||||
|
|
||||||
|
@ -284,18 +290,20 @@ class MinDalle:
|
||||||
text: str,
|
text: str,
|
||||||
seed: int = -1,
|
seed: int = -1,
|
||||||
grid_size: int = 1,
|
grid_size: int = 1,
|
||||||
log2_k: int = 6,
|
temperature: float = 1,
|
||||||
log2_supercondition_factor: int = 3,
|
top_k: int = 1024,
|
||||||
|
supercondition_factor: int = 16,
|
||||||
is_verbose: bool = False
|
is_verbose: bool = False
|
||||||
) -> Image.Image:
|
) -> Image.Image:
|
||||||
log2_mid_count = 0
|
log2_mid_count = 0
|
||||||
image_stream = self.generate_image_stream(
|
image_stream = self.generate_image_stream(
|
||||||
text,
|
text=text,
|
||||||
seed,
|
seed=seed,
|
||||||
grid_size,
|
grid_size=grid_size,
|
||||||
log2_mid_count,
|
log2_mid_count=log2_mid_count,
|
||||||
log2_k,
|
temperature=temperature,
|
||||||
log2_supercondition_factor,
|
top_k=top_k,
|
||||||
is_verbose
|
supercondition_factor=supercondition_factor,
|
||||||
|
is_verbose=is_verbose
|
||||||
)
|
)
|
||||||
return next(image_stream)
|
return next(image_stream)
|
|
@ -140,8 +140,9 @@ class DalleBartDecoder(nn.Module):
|
||||||
|
|
||||||
def decode_step(
|
def decode_step(
|
||||||
self,
|
self,
|
||||||
log2_k: int,
|
temperature: float,
|
||||||
log2_supercondition_factor: int,
|
top_k: int,
|
||||||
|
supercondition_factor: int,
|
||||||
attention_mask: BoolTensor,
|
attention_mask: BoolTensor,
|
||||||
encoder_state: FloatTensor,
|
encoder_state: FloatTensor,
|
||||||
attention_state: FloatTensor,
|
attention_state: FloatTensor,
|
||||||
|
@ -166,18 +167,17 @@ class DalleBartDecoder(nn.Module):
|
||||||
)
|
)
|
||||||
decoder_state = self.final_ln(decoder_state)
|
decoder_state = self.final_ln(decoder_state)
|
||||||
logits = self.lm_head(decoder_state)
|
logits = self.lm_head(decoder_state)
|
||||||
a = 2 ** log2_supercondition_factor
|
a = supercondition_factor
|
||||||
logits: FloatTensor = (
|
logits: FloatTensor = (
|
||||||
logits[:image_count, -1] * (1 - a) +
|
logits[:image_count, -1] * (1 - a) +
|
||||||
logits[image_count:, -1] * a
|
logits[image_count:, -1] * a
|
||||||
)
|
)
|
||||||
|
|
||||||
top_logits, _ = logits.topk(2 ** log2_k, dim=-1)
|
top_logits, _ = logits.topk(top_k, dim=-1)
|
||||||
probs = torch.where(
|
is_kept = logits >= top_logits[:, [-1]]
|
||||||
logits < top_logits[:, [-1]],
|
logits -= top_logits[:, [0]]
|
||||||
self.zero_prob,
|
logits /= max(temperature, 1e-6)
|
||||||
torch.exp(logits - top_logits[:, [0]])
|
probs = torch.where(is_kept, torch.exp(logits), self.zero_prob)
|
||||||
)
|
|
||||||
probs[:, 2 ** 14:] = 0 # vqgan vocab_count is only 2 ** 14
|
probs[:, 2 ** 14:] = 0 # vqgan vocab_count is only 2 ** 14
|
||||||
return probs, attention_state
|
return probs, attention_state
|
||||||
|
|
||||||
|
@ -185,8 +185,9 @@ class DalleBartDecoder(nn.Module):
|
||||||
def decode_row(
|
def decode_row(
|
||||||
self,
|
self,
|
||||||
row_index: int,
|
row_index: int,
|
||||||
log2_k: int,
|
temperature: float,
|
||||||
log2_supercondition_factor: int,
|
top_k: int,
|
||||||
|
supercondition_factor: int,
|
||||||
encoder_state: FloatTensor,
|
encoder_state: FloatTensor,
|
||||||
attention_mask: BoolTensor,
|
attention_mask: BoolTensor,
|
||||||
attention_state: FloatTensor,
|
attention_state: FloatTensor,
|
||||||
|
@ -195,8 +196,9 @@ class DalleBartDecoder(nn.Module):
|
||||||
for col_index in range(16):
|
for col_index in range(16):
|
||||||
i = 16 * row_index + col_index
|
i = 16 * row_index + col_index
|
||||||
probs, attention_state = self.decode_step(
|
probs, attention_state = self.decode_step(
|
||||||
log2_k = log2_k,
|
temperature = temperature,
|
||||||
log2_supercondition_factor = log2_supercondition_factor,
|
top_k = top_k,
|
||||||
|
supercondition_factor = supercondition_factor,
|
||||||
attention_mask = attention_mask,
|
attention_mask = attention_mask,
|
||||||
encoder_state = encoder_state,
|
encoder_state = encoder_state,
|
||||||
attention_state = attention_state,
|
attention_state = attention_state,
|
||||||
|
|
2
setup.py
2
setup.py
|
@ -5,7 +5,7 @@ setuptools.setup(
|
||||||
name='min-dalle',
|
name='min-dalle',
|
||||||
description = 'min(DALL·E)',
|
description = 'min(DALL·E)',
|
||||||
# long_description=(Path(__file__).parent / "README.rst").read_text(),
|
# long_description=(Path(__file__).parent / "README.rst").read_text(),
|
||||||
version='0.3.11',
|
version='0.3.12',
|
||||||
author='Brett Kuprel',
|
author='Brett Kuprel',
|
||||||
author_email='brkuprel@gmail.com',
|
author_email='brkuprel@gmail.com',
|
||||||
url='https://github.com/kuprel/min-dalle',
|
url='https://github.com/kuprel/min-dalle',
|
||||||
|
|
Loading…
Reference in New Issue
Block a user