import gradio as gr import spaces from gradio_litmodel3d import LitModel3D import os import shutil os.environ['SPCONV_ALGO'] = 'native' from typing import * import torch import numpy as np import imageio from PIL import Image from trellis.pipelines import TrellisImageTo3DPipeline from trellis.representations import MeshExtractResult from trellis.utils import render_utils, postprocessing_utils MAX_SEED = np.iinfo(np.int32).max TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp') os.makedirs(TMP_DIR, exist_ok=True) def start_session(req: gr.Request): user_dir = os.path.join(TMP_DIR, str(req.session_hash)) os.makedirs(user_dir, exist_ok=True) def end_session(req: gr.Request): user_dir = os.path.join(TMP_DIR, str(req.session_hash)) shutil.rmtree(user_dir) def preprocess_image(image: Image.Image) -> Image.Image: """ Preprocess the input image for 3D generation. This function is called when a user uploads an image or selects an example. It applies background removal and other preprocessing steps necessary for optimal 3D model generation. Args: image (Image.Image): The input image from the user Returns: Image.Image: The preprocessed image ready for 3D generation """ processed_image = pipeline.preprocess_image(image) return processed_image def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image]: """ Preprocess a list of input images for multi-image 3D generation. This function is called when users upload multiple images in the gallery. It processes each image to prepare them for the multi-image 3D generation pipeline. Args: images (List[Tuple[Image.Image, str]]): The input images from the gallery Returns: List[Image.Image]: The preprocessed images ready for 3D generation """ images = [image[0] for image in images] processed_images = [pipeline.preprocess_image(image) for image in images] return processed_images def get_seed(randomize_seed: bool, seed: int) -> int: """ Get the random seed for generation. This function is called by the generate button to determine whether to use a random seed or the user-specified seed value. Args: randomize_seed (bool): Whether to generate a random seed seed (int): The user-specified seed value Returns: int: The seed to use for generation """ return np.random.randint(0, MAX_SEED) if randomize_seed else seed @spaces.GPU(duration=120) def generate_and_extract_glb( image: Image.Image, multiimages: List[Tuple[Image.Image, str]], is_multiimage: bool, seed: int, ss_guidance_strength: float, ss_sampling_steps: int, slat_guidance_strength: float, slat_sampling_steps: int, multiimage_algo: Literal["multidiffusion", "stochastic"], mesh_simplify: float, texture_size: int, req: gr.Request, ) -> Tuple[str, str, str]: """ Convert an image to a 3D model and extract GLB file. Args: image (Image.Image): The input image. multiimages (List[Tuple[Image.Image, str]]): The input images in multi-image mode. is_multiimage (bool): Whether is in multi-image mode. seed (int): The random seed. ss_guidance_strength (float): The guidance strength for sparse structure generation. ss_sampling_steps (int): The number of sampling steps for sparse structure generation. slat_guidance_strength (float): The guidance strength for structured latent generation. slat_sampling_steps (int): The number of sampling steps for structured latent generation. multiimage_algo (Literal["multidiffusion", "stochastic"]): The algorithm for multi-image generation. mesh_simplify (float): The mesh simplification factor. texture_size (int): The texture resolution. Returns: str: The path to the video of the 3D model. str: The path to the extracted GLB file. str: The path to the extracted GLB file (for download). """ user_dir = os.path.join(TMP_DIR, str(req.session_hash)) # Generate 3D model if not is_multiimage: outputs = pipeline.run( image, seed=seed, formats=["gaussian", "mesh"], preprocess_image=False, sparse_structure_sampler_params={ "steps": ss_sampling_steps, "cfg_strength": ss_guidance_strength, }, slat_sampler_params={ "steps": slat_sampling_steps, "cfg_strength": slat_guidance_strength, }, ) else: outputs = pipeline.run_multi_image( [image[0] for image in multiimages], seed=seed, formats=["gaussian", "mesh"], preprocess_image=False, sparse_structure_sampler_params={ "steps": ss_sampling_steps, "cfg_strength": ss_guidance_strength, }, slat_sampler_params={ "steps": slat_sampling_steps, "cfg_strength": slat_guidance_strength, }, mode=multiimage_algo, ) # Render video video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color'] video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal'] video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))] video_path = os.path.join(user_dir, 'sample.mp4') imageio.mimsave(video_path, video, fps=15) # Extract GLB gs = outputs['gaussian'][0] mesh = outputs['mesh'][0] glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False) glb_path = os.path.join(user_dir, 'sample.glb') glb.export(glb_path) torch.cuda.empty_cache() return video_path, glb_path, glb_path theme = gr.themes.Base().set( body_background_fill="#1A1A1A", body_background_fill_dark="#1A1A1A", body_text_color="#CCCCCC", body_text_color_dark="#CCCCCC", block_background_fill="#2C2C2C", block_background_fill_dark="#2C2C2C", block_border_color="#3C3C3C", block_border_color_dark="#3C3C3C", button_primary_background_fill="#FF8C00", button_primary_background_fill_dark="#FF8C00", button_primary_background_fill_hover="#FF9F33", button_primary_border_color="*primary_500", button_primary_text_color="white", button_primary_text_color_dark="white", block_border_width="1px", block_radius="8px" ) with gr.Blocks( theme=theme, css=""" .gradio-container { background: #1A1A1A !important; color: #CCCCCC !important; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important; } .gradio-container .footer, .gradio-container footer, .gradio-container [data-testid="footer"], .gradio-container .gradio-footer { display: none !important; } .gradio-container .gradio-container { padding-bottom: 0 !important; } .gradio-container h1, .gradio-container h2, .gradio-container h3 { color: #FFFFFF !important; font-weight: bold !important; } .gradio-container .markdown { color: #CCCCCC !important; } .gradio-container .tab-nav { background: #2C2C2C !important; border: none !important; } .gradio-container .tab-nav button { background: #2C2C2C !important; color: #CCCCCC !important; border: none !important; border-radius: 8px 8px 0 0 !important; } .gradio-container .tab-nav button.selected { background: #FF8C00 !important; color: #FFFFFF !important; } .gradio-container .tab-nav button:hover { background: #3C3C3C !important; } .gradio-container .tab-nav button.selected:hover { background: #FF8C00 !important; } .gradio-container .tab-content { background: #2C2C2C !important; border: none !important; border-radius: 0 0 8px 8px !important; padding: 20px !important; } .gradio-container .accordion { background: #2C2C2C !important; border: 1px solid #3C3C3C !important; border-radius: 8px !important; margin: 10px 0 !important; } .gradio-container .accordion .accordion-header { background: #2C2C2C !important; color: #FFFFFF !important; border: none !important; border-radius: 8px !important; } .gradio-container .accordion .accordion-content { background: #2C2C2C !important; color: #CCCCCC !important; border: none !important; border-radius: 0 0 8px 8px !important; } .gradio-container .button { background: #FF8C00 !important; color: #FFFFFF !important; border: none !important; border-radius: 8px !important; font-weight: bold !important; padding: 12px 24px !important; } .gradio-container .button:hover { background: #FF9F33 !important; } .gradio-container .button.secondary { background: #3C3C3C !important; color: #CCCCCC !important; } .gradio-container .button.secondary:hover { background: #4C4C4C !important; } .gradio-container .slider { background: #3C3C3C !important; } .gradio-container .slider .slider-handle { background: #FF8C00 !important; border: 2px solid #FFFFFF !important; } .gradio-container .slider .slider-track { background: #3C3C3C !important; } .gradio-container .slider .slider-track-fill { background: #FF8C00 !important; } .gradio-container .checkbox { color: #CCCCCC !important; } .gradio-container .radio { color: #CCCCCC !important; } .gradio-container .gallery { background: #2C2C2C !important; border: 1px solid #3C3C3C !important; border-radius: 8px !important; } .gradio-container .image { background: #2C2C2C !important; border: 1px solid #3C3C3C !important; border-radius: 8px !important; } .gradio-container .video { background: #2C2C2C !important; border: 1px solid #3C3C3C !important; border-radius: 8px !important; } .gradio-container .model3d { background: #2C2C2C !important; border: 1px solid #3C3C3C !important; border-radius: 8px !important; } .gradio-container .row { gap: 20px !important; } .gradio-container .column { background: #2C2C2C !important; border: 1px solid #3C3C3C !important; border-radius: 8px !important; padding: 20px !important; } .gradio-container .row { align-items: flex-start !important; justify-content: center !important; } .gradio-container .container { max-width: 1200px !important; margin: 0 auto !important; padding: 20px !important; } """ ) as demo: gr.Markdown("""

Instructions

• Upload an image and click "Generate 3D Asset" to create a 3D model

• Images with alpha channels will use the existing mask, otherwise background removal is applied automatically

• Multi-image mode supports multiple views for enhanced 3D reconstruction

""") with gr.Row(): with gr.Column(scale=1): gr.Markdown("""

3D Model Generation

Generate 3D models and textures from image or text using AI technology.

""") with gr.Tabs() as input_tabs: with gr.Tab(label="Single Image", id=0) as single_image_input_tab: image_prompt = gr.Image(label="Image Prompt", format="png", image_mode="RGBA", type="pil", height=300) with gr.Tab(label="Multiple Images", id=1) as multiimage_input_tab: multiimage_prompt = gr.Gallery(label="Image Prompt", format="png", type="pil", height=300, columns=3) gr.Markdown(""" **Multi-Image Mode:** Upload different views of the same object for enhanced 3D reconstruction. *Note: For best results, ensure consistent lighting and object positioning across views.* """) with gr.Accordion(label="Generation Settings", open=False): seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1) randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) gr.Markdown("**Stage 1: Sparse Structure Generation**") with gr.Row(): ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1) ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1) gr.Markdown("**Stage 2: Structured Latent Generation**") with gr.Row(): slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1) slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1) multiimage_algo = gr.Radio(["stochastic", "multidiffusion"], label="Multi-image Algorithm", value="stochastic") with gr.Accordion(label="Output Settings", open=False): mesh_simplify = gr.Slider(0.3, 0.98, label="Mesh Simplification", value=0.95, step=0.01) texture_size = gr.Slider(512, 2048, label="Texture Resolution", value=1024, step=512) generate_btn = gr.Button("Generate 3D Asset", variant="primary", size="lg") with gr.Column(scale=1): gr.Markdown("""

Generated 3D Asset

Preview and download your generated 3D model.

""") video_output = gr.Video(label="Generated 3D Asset", autoplay=True, loop=True, height=300) model_output = LitModel3D(label="3D Model Preview", exposure=10.0, height=300) download_glb = gr.DownloadButton(label="Download 3D Asset", interactive=False, variant="secondary") is_multiimage = gr.State(False) # Handlers demo.load(start_session) demo.unload(end_session) single_image_input_tab.select( lambda: False, outputs=[is_multiimage] ) multiimage_input_tab.select( lambda: True, outputs=[is_multiimage] ) image_prompt.upload( preprocess_image, inputs=[image_prompt], outputs=[image_prompt], ) multiimage_prompt.upload( preprocess_images, inputs=[multiimage_prompt], outputs=[multiimage_prompt], ) generate_btn.click( get_seed, inputs=[randomize_seed, seed], outputs=[seed], ).then( generate_and_extract_glb, inputs=[image_prompt, multiimage_prompt, is_multiimage, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps, multiimage_algo, mesh_simplify, texture_size], outputs=[video_output, model_output, download_glb], ).then( lambda: gr.Button(interactive=True), outputs=[download_glb], ) video_output.clear( lambda: gr.Button(interactive=False), outputs=[download_glb], ) model_output.clear( lambda: gr.Button(interactive=False), outputs=[download_glb], ) gr.Markdown("""

Powered by Mean Cat Entertainment • Built for the future of VFX

""") # Launch the Gradio app if __name__ == "__main__": pipeline = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large") pipeline.cuda() try: pipeline.preprocess_image(Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))) # Preload rembg except: pass demo.launch(mcp_server=True)