#nct西珍妮[超话]# 出 走pt 懂包装 有小礼物
香奈儿娜 日比耶单封娜 wb娜闪 冬专灿
黄衣灿 比耶灿罗渽民 大队sw娜 直播卡娜 爱宝门票娜 src娜 七周年红酒杯娜 byl id卡娜 餐盘娜 杯垫娜 挡板娜 高脚杯 展会拍立得娜 徽章娜 戳脸娜 fm入场娜 玩偶娜 srp娜 k4p娜 kms签售v3v1娜 应援棒贴纸娜 Candy随机娜sw娜 kmsp娜 台历娜 nct dream 划钓 jaemin istj 李马克make 黄仁俊renjun 李帝努 李楷灿 罗渽民 钟辰乐 朴志晟nct 127 dream 白菜价 李楷灿 李马克 钟辰乐 李泰容 金道英 金廷祐 朴志晟 DVD灿 gm glitch mode mumo灿 sticker owhat灿 istj wm6.0灿 favorite马闪 西装马闪 istj满额灿 sticker卡车马 台历阿拉丁马灿 崎玉游戏卡马 电影卡灿 红发灿大队mumo乐 大队日专乐 candy贴纸诺set bfe娜 爱宝星 湖人钥匙扣马闪 首巡容闪 返校貂 英雄t貂 大队mumo kun
香奈儿娜 日比耶单封娜 wb娜闪 冬专灿
黄衣灿 比耶灿罗渽民 大队sw娜 直播卡娜 爱宝门票娜 src娜 七周年红酒杯娜 byl id卡娜 餐盘娜 杯垫娜 挡板娜 高脚杯 展会拍立得娜 徽章娜 戳脸娜 fm入场娜 玩偶娜 srp娜 k4p娜 kms签售v3v1娜 应援棒贴纸娜 Candy随机娜sw娜 kmsp娜 台历娜 nct dream 划钓 jaemin istj 李马克make 黄仁俊renjun 李帝努 李楷灿 罗渽民 钟辰乐 朴志晟nct 127 dream 白菜价 李楷灿 李马克 钟辰乐 李泰容 金道英 金廷祐 朴志晟 DVD灿 gm glitch mode mumo灿 sticker owhat灿 istj wm6.0灿 favorite马闪 西装马闪 istj满额灿 sticker卡车马 台历阿拉丁马灿 崎玉游戏卡马 电影卡灿 红发灿大队mumo乐 大队日专乐 candy贴纸诺set bfe娜 爱宝星 湖人钥匙扣马闪 首巡容闪 返校貂 英雄t貂 大队mumo kun
#nct西珍妮[超话]# 出出出 走平台 可以预留 可
90半包 130包邮
nct 127 dream 白菜价 李楷灿 李马克 钟辰乐 李泰容 金道英 金廷祐 朴志晟 DVD灿 gm glitch mode mumo灿 sticker owhat灿 istj wm6.0灿 favorite马闪 西装马闪 istj满额灿 sticker卡车马 台历阿拉丁马灿 崎玉游戏卡马 电影卡灿 红发灿大队mumo乐 大队日专乐 candy贴纸诺set bfe娜 爱宝星 湖人钥匙扣马闪 首巡容闪 返校貂 英雄t貂 大队mumo kun
90半包 130包邮
nct 127 dream 白菜价 李楷灿 李马克 钟辰乐 李泰容 金道英 金廷祐 朴志晟 DVD灿 gm glitch mode mumo灿 sticker owhat灿 istj wm6.0灿 favorite马闪 西装马闪 istj满额灿 sticker卡车马 台历阿拉丁马灿 崎玉游戏卡马 电影卡灿 红发灿大队mumo乐 大队日专乐 candy贴纸诺set bfe娜 爱宝星 湖人钥匙扣马闪 首巡容闪 返校貂 英雄t貂 大队mumo kun
refer to the following Colab python code,
write a new Colab python code for request :
user import Three View or Multiview orthographic projection image png jpg etc file, loading image to produce 3D cloud point,
write the results on .obj file
! pip install plotly -q
!git clone https://t.cn/A6KTcqVE
%cd shap-e
!pip install -e .
!git clone https://t.cn/A6NRWmuS
#Enter the directory and install the requirements
%cd shap-e
!pip install -e .
from PIL import Image
import torch
from tqdm.auto import tqdm
from point_e.diffusion.configs import DIFFUSION_CONFIGS, diffusion_from_config
from point_e.diffusion.sampler import PointCloudSampler
from point_e.models.download import load_checkpoint
from point_e.models.configs import MODEL_CONFIGS, model_from_config
from point_e.util.plotting import plot_point_cloud
#Implementation and Cooking the 3D models, import all the necessary libraries.
#%cd /content/shap-e
import torch
from shap_e.diffusion.sample import sample_latents
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
from shap_e.models.download import load_model, load_config
from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget
#set the device to cuda if available, otherwise to cpu.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#load the models and weights.
xm = load_model('transmitter', device=device)
model = load_model('text300M', device=device)
diffusion = diffusion_from_config(load_config('diffusion'))
#generate the 3D models.
batch_size = 1 # this is the size of the models, higher values take longer to generate.
guidance_scale = 65.0 # this is the scale of the guidance, higher values make the model look more like the prompt.
latents = sample_latents(
batch_size=batch_size,
model=model,
diffusion=diffusion,
guidance_scale=guidance_scale,
model_kwargs=dict(texts=[prompt] * batch_size),
progress=True,
clip_denoised=True,
use_fp16=True,
use_karras=True,
karras_steps=64,
sigma_min=1E-3,
sigma_max=160,
s_churn=0,
)
render_mode = 'stf' #
size = 128 # this is the size of the renders, higher values take longer to render.
cameras = create_pan_cameras(size, device)
for i, latent in enumerate(latents):
images = decode_latent_images(xm, latent, cameras, rendering_mode=render_mode)
display(gif_widget(images))
#save the 3D models as .ply and .obj files.
# Example of saving the latents as meshes.
from shap_e.util.notebooks import decode_latent_mesh
for i, latent in enumerate(latents):
t = decode_latent_mesh(xm, latent).tri_mesh()
with open(f'example_mesh_{i}.ply', 'wb') as f: # this is three-dimensional geometric data of model.
t.write_ply(f)
with open(f'example_mesh_{i}.obj', 'w') as f: # we will use this file to customize in Blender Studio later.
t.write_obj(f)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('creating base model...')
base_name = 'base40M'
base_model = model_from_config(MODEL_CONFIGS[base_name], device)
base_model.eval()
base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name])
print('creating upsample model...')
upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)
upsampler_model.eval()
upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])
print('downloading base checkpoint...')
base_model.load_state_dict(load_checkpoint(base_name, device))
print('downloading upsampler checkpoint...')
upsampler_model.load_state_dict(load_checkpoint('upsample', device))
sampler = PointCloudSampler(
device=device,
models=[base_model, upsampler_model],
diffusions=[base_diffusion, upsampler_diffusion],
num_points=[1024, 4096 - 1024],
aux_channels=['R', 'G', 'B'],
guidance_scale=[3.0, 3.0],
)
from google.colab import files
uploaded = files.upload()
# Load an image to condition on.
img = Image.open('figure_all.jpg')
# Produce a sample from the model.
samples = None
for x in tqdm(sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(images=[img]))):
samples = x
img
pc = sampler.output_to_point_clouds(samples)[0]
fig = plot_point_cloud(pc, grid_size=3, fixed_bounds=((-0.75, -0.75, -0.75),(0.75, 0.75, 0.75)))
import plotly.graph_objects as go
fig_plotly = go.Figure(
data=[
go.Scatter3d(
x=pc.coords[:,0], y=pc.coords[:,1], z=pc.coords[:,2],
mode='markers',
marker=dict(
size=2,
color=['rgb({},{},{})'.format(r,g,b) for r,g,b in zip(pc.channels["R"], pc.channels["G"], pc.channels["B"])],
)
)
],
layout=dict(
scene=dict(
xaxis=dict(visible=False),
yaxis=dict(visible=False),
zaxis=dict(visible=False)
)
),
)
fig_plotly.show(renderer="colab")
from point_e.util.pc_to_mesh import marching_cubes_mesh
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('creating SDF model...')
name = 'sdf'
model = model_from_config(MODEL_CONFIGS[name], device)
model.eval()
print('loading SDF model...')
model.load_state_dict(load_checkpoint(name, device))
import skimage.measure as measure
# Produce a mesh (with vertex colors)
mesh = marching_cubes_mesh(
pc=pc,
model=model,
batch_size=4096,
grid_size=32, # increase to 128 for resolution used in evals
progress=True,
)
# Write the mesh to a PLY file to import into some other program.
with open('figure_all.obj', 'wb') as f:
mesh.write_ply(f)
write a new Colab python code for request :
user import Three View or Multiview orthographic projection image png jpg etc file, loading image to produce 3D cloud point,
write the results on .obj file
! pip install plotly -q
!git clone https://t.cn/A6KTcqVE
%cd shap-e
!pip install -e .
!git clone https://t.cn/A6NRWmuS
#Enter the directory and install the requirements
%cd shap-e
!pip install -e .
from PIL import Image
import torch
from tqdm.auto import tqdm
from point_e.diffusion.configs import DIFFUSION_CONFIGS, diffusion_from_config
from point_e.diffusion.sampler import PointCloudSampler
from point_e.models.download import load_checkpoint
from point_e.models.configs import MODEL_CONFIGS, model_from_config
from point_e.util.plotting import plot_point_cloud
#Implementation and Cooking the 3D models, import all the necessary libraries.
#%cd /content/shap-e
import torch
from shap_e.diffusion.sample import sample_latents
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
from shap_e.models.download import load_model, load_config
from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget
#set the device to cuda if available, otherwise to cpu.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#load the models and weights.
xm = load_model('transmitter', device=device)
model = load_model('text300M', device=device)
diffusion = diffusion_from_config(load_config('diffusion'))
#generate the 3D models.
batch_size = 1 # this is the size of the models, higher values take longer to generate.
guidance_scale = 65.0 # this is the scale of the guidance, higher values make the model look more like the prompt.
latents = sample_latents(
batch_size=batch_size,
model=model,
diffusion=diffusion,
guidance_scale=guidance_scale,
model_kwargs=dict(texts=[prompt] * batch_size),
progress=True,
clip_denoised=True,
use_fp16=True,
use_karras=True,
karras_steps=64,
sigma_min=1E-3,
sigma_max=160,
s_churn=0,
)
render_mode = 'stf' #
size = 128 # this is the size of the renders, higher values take longer to render.
cameras = create_pan_cameras(size, device)
for i, latent in enumerate(latents):
images = decode_latent_images(xm, latent, cameras, rendering_mode=render_mode)
display(gif_widget(images))
#save the 3D models as .ply and .obj files.
# Example of saving the latents as meshes.
from shap_e.util.notebooks import decode_latent_mesh
for i, latent in enumerate(latents):
t = decode_latent_mesh(xm, latent).tri_mesh()
with open(f'example_mesh_{i}.ply', 'wb') as f: # this is three-dimensional geometric data of model.
t.write_ply(f)
with open(f'example_mesh_{i}.obj', 'w') as f: # we will use this file to customize in Blender Studio later.
t.write_obj(f)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('creating base model...')
base_name = 'base40M'
base_model = model_from_config(MODEL_CONFIGS[base_name], device)
base_model.eval()
base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name])
print('creating upsample model...')
upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)
upsampler_model.eval()
upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])
print('downloading base checkpoint...')
base_model.load_state_dict(load_checkpoint(base_name, device))
print('downloading upsampler checkpoint...')
upsampler_model.load_state_dict(load_checkpoint('upsample', device))
sampler = PointCloudSampler(
device=device,
models=[base_model, upsampler_model],
diffusions=[base_diffusion, upsampler_diffusion],
num_points=[1024, 4096 - 1024],
aux_channels=['R', 'G', 'B'],
guidance_scale=[3.0, 3.0],
)
from google.colab import files
uploaded = files.upload()
# Load an image to condition on.
img = Image.open('figure_all.jpg')
# Produce a sample from the model.
samples = None
for x in tqdm(sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(images=[img]))):
samples = x
img
pc = sampler.output_to_point_clouds(samples)[0]
fig = plot_point_cloud(pc, grid_size=3, fixed_bounds=((-0.75, -0.75, -0.75),(0.75, 0.75, 0.75)))
import plotly.graph_objects as go
fig_plotly = go.Figure(
data=[
go.Scatter3d(
x=pc.coords[:,0], y=pc.coords[:,1], z=pc.coords[:,2],
mode='markers',
marker=dict(
size=2,
color=['rgb({},{},{})'.format(r,g,b) for r,g,b in zip(pc.channels["R"], pc.channels["G"], pc.channels["B"])],
)
)
],
layout=dict(
scene=dict(
xaxis=dict(visible=False),
yaxis=dict(visible=False),
zaxis=dict(visible=False)
)
),
)
fig_plotly.show(renderer="colab")
from point_e.util.pc_to_mesh import marching_cubes_mesh
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('creating SDF model...')
name = 'sdf'
model = model_from_config(MODEL_CONFIGS[name], device)
model.eval()
print('loading SDF model...')
model.load_state_dict(load_checkpoint(name, device))
import skimage.measure as measure
# Produce a mesh (with vertex colors)
mesh = marching_cubes_mesh(
pc=pc,
model=model,
batch_size=4096,
grid_size=32, # increase to 128 for resolution used in evals
progress=True,
)
# Write the mesh to a PLY file to import into some other program.
with open('figure_all.obj', 'wb') as f:
mesh.write_ply(f)
✋热门推荐