Instructions to use BiliSakura/BitDance-Tokenizer-diffusers with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use BiliSakura/BitDance-Tokenizer-diffusers with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline from diffusers.utils import load_image # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("BiliSakura/BitDance-Tokenizer-diffusers", dtype=torch.bfloat16, device_map="cuda") prompt = "Turn this cat into a dog" input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png") image = pipe(image=input_image, prompt=prompt).images[0] - Notebooks
- Google Colab
- Kaggle
| from __future__ import annotations | |
| import torch | |
| from torch import nn | |
| from diffusers.configuration_utils import ConfigMixin, register_to_config | |
| from diffusers.models.modeling_utils import ModelMixin | |
| from transformers.activations import ACT2FN | |
| class BitDanceProjector(ModelMixin, ConfigMixin): | |
| def __init__( | |
| self, | |
| in_dim: int, | |
| out_dim: int, | |
| hidden_act: str = "gelu_pytorch_tanh", | |
| ) -> None: | |
| super().__init__() | |
| self.activation_fn = ACT2FN[hidden_act] | |
| self.fc1 = nn.Linear(in_dim, out_dim) | |
| self.fc2 = nn.Linear(out_dim, out_dim) | |
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: | |
| hidden_states = hidden_states.to(self.fc1.weight.dtype) | |
| hidden_states = self.fc1(hidden_states) | |
| hidden_states = self.activation_fn(hidden_states) | |
| hidden_states = self.fc2(hidden_states) | |
| return hidden_states | |