Naeem4600 commited on
Commit
94b705a
·
verified ·
1 Parent(s): 019a8c2

Upload 6 files

Browse files
Files changed (6) hide show
  1. .gitattributes +40 -35
  2. .gitignore +1 -0
  3. README.md +15 -13
  4. app.py +68 -13
  5. requirements-detail.txt +27 -0
  6. requirements.txt +29 -0
.gitattributes CHANGED
@@ -1,35 +1,40 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
37
+ *.o filter=lfs diff=lfs merge=lfs -text
38
+ *.ninja_deps filter=lfs diff=lfs merge=lfs -text
39
+ *.so filter=lfs diff=lfs merge=lfs -text
40
+ *.whl filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.pyc
README.md CHANGED
@@ -1,13 +1,15 @@
1
- ---
2
- title: Unique3D786
3
- emoji: 🏢
4
- colorFrom: purple
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.25.2
8
- app_file: app.py
9
- pinned: false
10
- short_description: 3D
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
+ ---
2
+ title: Unique3D
3
+ emoji:
4
+ colorFrom: red
5
+ colorTo: purple
6
+ sdk: gradio
7
+ python_version: 3.10.8
8
+ sdk_version: 4.12.0
9
+ app_file: app.py
10
+ pinned: true
11
+ short_description: Create a 1M faces 3D colored model from an image!
12
+ license: mit
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,13 +1,68 @@
1
- import gradio as gr
2
- import torch
3
- import spaces # Import the spaces library
4
-
5
- @spaces.GPU # Decorate the function that needs GPU
6
- def greet(n):
7
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
- zero = torch.tensor([0]).to(device)
9
- print(zero.device)
10
- return f"Hello {zero + n} Tensor"
11
-
12
- demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
13
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import shlex
2
+ import subprocess
3
+ subprocess.run(
4
+ shlex.split(
5
+ "pip install package/onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl --force-reinstall --no-deps"
6
+ )
7
+ )
8
+ subprocess.run(
9
+ shlex.split(
10
+ "pip install package/nvdiffrast-0.3.1.torch-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps"
11
+ )
12
+ )
13
+
14
+ if __name__ == "__main__":
15
+ import os
16
+ from huggingface_hub import login
17
+ hf_token = os.environ.get("HF_TOKEN")
18
+ login(token=hf_token)
19
+
20
+ import os
21
+ import sys
22
+ sys.path.append(os.curdir)
23
+ import torch
24
+ torch.set_float32_matmul_precision('medium')
25
+ torch.backends.cuda.matmul.allow_tf32 = True
26
+ torch.set_grad_enabled(False)
27
+
28
+ import fire
29
+ import gradio as gr
30
+ from gradio_app.gradio_3dgen import create_ui as create_3d_ui
31
+ from gradio_app.all_models import model_zoo
32
+
33
+
34
+ _TITLE = '''Unique3D: High-Quality and Efficient 3D Mesh Generation from a Single Image'''
35
+ _DESCRIPTION = '''
36
+
37
+ <div>
38
+ <a style="display:inline-block" href='https://github.com/AiuniAI/Unique3D'><img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/AiuniAI/Unique3D?style=social">
39
+ </a>
40
+ <img alt="GitHub License" src="https://img.shields.io/github/license/AiuniAI/Unique3D">
41
+ </div>
42
+
43
+ # [Paper](https://arxiv.org/abs/2405.20343) | [Project page](https://wukailu.github.io/Unique3D/) | [Huggingface Demo](https://huggingface.co/spaces/Wuvin/Unique3D) | [Gradio Demo](http://unique3d.demo.avar.cn/) | [Online Demo](https://www.aiuni.ai/)
44
+
45
+ * High-fidelity and diverse textured meshes generated by Unique3D from single-view images.
46
+
47
+ * The demo is still under construction, and more features are expected to be implemented soon.
48
+
49
+ * If the Huggingface Demo is overcrowded or fails to produce stable results, you can use the Online Demo [aiuni.ai](https://www.aiuni.ai/), which is free to try (get the registration invitation code Join Discord: https://discord.gg/aiuni). However, the Online Demo is slightly different from the Gradio Demo, in that the inference speed is slower, but the generation is much more stable.
50
+ '''
51
+
52
+ def launch():
53
+ model_zoo.init_models()
54
+
55
+ with gr.Blocks(
56
+ title=_TITLE,
57
+ # theme=gr.themes.Monochrome(),
58
+ ) as demo:
59
+ with gr.Row():
60
+ with gr.Column(scale=1):
61
+ gr.Markdown('# ' + _TITLE)
62
+ gr.Markdown(_DESCRIPTION)
63
+ create_3d_ui("wkl")
64
+
65
+ demo.queue().launch(share=True)
66
+
67
+ if __name__ == '__main__':
68
+ fire.Fire(launch)
requirements-detail.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.29.2
2
+ datasets==2.18.0
3
+ diffusers==0.27.2
4
+ fire==0.6.0
5
+ gradio==4.32.0
6
+ jaxtyping==0.2.29
7
+ numba==0.59.1
8
+ numpy==1.26.4
9
+ nvdiffrast==0.3.1
10
+ omegaconf==2.3.0
11
+ onnxruntime_gpu==1.17.0
12
+ opencv_python==4.9.0.80
13
+ opencv_python_headless==4.9.0.80
14
+ ort_nightly_gpu==1.17.0.dev20240118002
15
+ peft==0.10.0
16
+ Pillow==10.3.0
17
+ pygltflib==1.16.2
18
+ pymeshlab==2023.12.post1
19
+ pytorch3d==0.7.5
20
+ rembg==2.0.56
21
+ torch==2.1.0+cu121
22
+ torch_scatter==2.1.2
23
+ tqdm==4.64.1
24
+ transformers==4.39.3
25
+ trimesh==4.3.0
26
+ typeguard==2.13.3
27
+ wandb==0.16.6
requirements.txt ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d @ https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt221/pytorch3d-0.7.6-cp310-cp310-linux_x86_64.whl
2
+ ort_nightly_gpu @ https://aiinfra.pkgs.visualstudio.com/2692857e-05ef-43b4-ba9c-ccf1c22c437c/_packaging/d3daa2b0-aa56-45ac-8145-2c3dc0661c87/pypi/download/ort-nightly-gpu/1.17.dev20240118002/ort_nightly_gpu-1.17.0.dev20240118002-cp310-cp310-manylinux_2_28_x86_64.whl
3
+ onnxruntime_gpu @ https://pkgs.dev.azure.com/onnxruntime/2a773b67-e88b-4c7f-9fc0-87d31fea8ef2/_packaging/7fa31e42-5da1-4e84-a664-f2b4129c7d45/pypi/download/onnxruntime-gpu/1.17/onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl
4
+ torch==2.2.0
5
+ accelerate
6
+ datasets
7
+ diffusers>=0.26.3
8
+ fire
9
+ gradio
10
+ jaxtyping
11
+ numba
12
+ numpy
13
+ omegaconf>=2.3.0
14
+ opencv_python
15
+ opencv_python_headless
16
+ peft
17
+ Pillow
18
+ pygltflib
19
+ pymeshlab>=2023.12
20
+ rembg[gpu]
21
+ torch>=2.0.1
22
+ torch_scatter @ https://data.pyg.org/whl/torch-2.2.0%2Bcu121/torch_scatter-2.1.2%2Bpt22cu121-cp310-cp310-linux_x86_64.whl
23
+ tqdm
24
+ transformers
25
+ trimesh
26
+ typeguard
27
+ wandb
28
+ xformers
29
+ ninja