refer to the following Colab python code,
write a new Colab python code for request :
user import Three View or Multiview orthographic projection image png jpg etc file, loading image to produce 3D cloud point,
write the results on .obj file

! pip install plotly -q

!git clone https://t.cn/A6KTcqVE

%cd shap-e
!pip install -e .

!git clone https://t.cn/A6NRWmuS

#Enter the directory and install the requirements
%cd shap-e
!pip install -e .

from PIL import Image
import torch
from tqdm.auto import tqdm

from point_e.diffusion.configs import DIFFUSION_CONFIGS, diffusion_from_config
from point_e.diffusion.sampler import PointCloudSampler
from point_e.models.download import load_checkpoint
from point_e.models.configs import MODEL_CONFIGS, model_from_config
from point_e.util.plotting import plot_point_cloud

#Implementation and Cooking the 3D models, import all the necessary libraries.
#%cd /content/shap-e
import torch

from shap_e.diffusion.sample import sample_latents
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
from shap_e.models.download import load_model, load_config
from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget

#set the device to cuda if available, otherwise to cpu.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

#load the models and weights.
xm = load_model('transmitter', device=device)
model = load_model('text300M', device=device)
diffusion = diffusion_from_config(load_config('diffusion'))

#generate the 3D models.
batch_size = 1 # this is the size of the models, higher values take longer to generate.
guidance_scale = 65.0 # this is the scale of the guidance, higher values make the model look more like the prompt.

latents = sample_latents(

batch_size=batch_size,

model=model,

diffusion=diffusion,

guidance_scale=guidance_scale,

model_kwargs=dict(texts=[prompt] * batch_size),

progress=True,

clip_denoised=True,

use_fp16=True,

use_karras=True,

karras_steps=64,

sigma_min=1E-3,

sigma_max=160,

s_churn=0,
)

render_mode = 'stf' #
size = 128 # this is the size of the renders, higher values take longer to render.

cameras = create_pan_cameras(size, device)
for i, latent in enumerate(latents):

images = decode_latent_images(xm, latent, cameras, rendering_mode=render_mode)

display(gif_widget(images))

#save the 3D models as .ply and .obj files.
# Example of saving the latents as meshes.
from shap_e.util.notebooks import decode_latent_mesh

for i, latent in enumerate(latents):

t = decode_latent_mesh(xm, latent).tri_mesh()

with open(f'example_mesh_{i}.ply', 'wb') as f: # this is three-dimensional geometric data of model.

t.write_ply(f)

with open(f'example_mesh_{i}.obj', 'w') as f: # we will use this file to customize in Blender Studio later.

t.write_obj(f)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print('creating base model...')
base_name = 'base40M'
base_model = model_from_config(MODEL_CONFIGS[base_name], device)
base_model.eval()
base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name])

print('creating upsample model...')
upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)
upsampler_model.eval()
upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])

print('downloading base checkpoint...')
base_model.load_state_dict(load_checkpoint(base_name, device))

print('downloading upsampler checkpoint...')
upsampler_model.load_state_dict(load_checkpoint('upsample', device))

sampler = PointCloudSampler(

device=device,

models=[base_model, upsampler_model],

diffusions=[base_diffusion, upsampler_diffusion],

num_points=[1024, 4096 - 1024],

aux_channels=['R', 'G', 'B'],

guidance_scale=[3.0, 3.0],
)

from google.colab import files
uploaded = files.upload()

# Load an image to condition on.
img = Image.open('figure_all.jpg')

# Produce a sample from the model.
samples = None
for x in tqdm(sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(images=[img]))):

samples = x

img

pc = sampler.output_to_point_clouds(samples)[0]

fig = plot_point_cloud(pc, grid_size=3, fixed_bounds=((-0.75, -0.75, -0.75),(0.75, 0.75, 0.75)))

import plotly.graph_objects as go

fig_plotly = go.Figure(

data=[

go.Scatter3d(

x=pc.coords[:,0], y=pc.coords[:,1], z=pc.coords[:,2],

mode='markers',

marker=dict(

size=2,

color=['rgb({},{},{})'.format(r,g,b) for r,g,b in zip(pc.channels["R"], pc.channels["G"], pc.channels["B"])],

)

)

],

layout=dict(

scene=dict(

xaxis=dict(visible=False),

yaxis=dict(visible=False),

zaxis=dict(visible=False)

)

),

)

fig_plotly.show(renderer="colab")

from point_e.util.pc_to_mesh import marching_cubes_mesh

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print('creating SDF model...')
name = 'sdf'
model = model_from_config(MODEL_CONFIGS[name], device)
model.eval()

print('loading SDF model...')
model.load_state_dict(load_checkpoint(name, device))

import skimage.measure as measure

# Produce a mesh (with vertex colors)
mesh = marching_cubes_mesh(

pc=pc,

model=model,

batch_size=4096,

grid_size=32, # increase to 128 for resolution used in evals

progress=True,
)

# Write the mesh to a PLY file to import into some other program.
with open('figure_all.obj', 'wb') as f:

mesh.write_ply(f)

import requests
from PIL import Image
import torch
from tqdm.auto import tqdm
from shap_e.diffusion.sample import sample_latents
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
from shap_e.models.download import load_model, load_config
from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget
from shap_e.util.notebooks import decode_latent_mesh
from shap_e.util.pc_to_mesh import marching_cubes_mesh

# Install necessary packages
!pip install plotly -q

# Clone the required repositories
!git clone https://t.cn/A6KTcqVE
%cd point-e
!pip install -e .
%cd ..

!git clone https://t.cn/A6NRWmuS
%cd shap-e
!pip install -e .
%cd ..

def generate_3d_model(image_path, prompt, render_mode='stf', size=128):

# Load models and weights

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

xm = load_model('transmitter', device=device)

model = load_model('text300M', device=device)

diffusion = diffusion_from_config(load_config('diffusion'))

# Generate 3D models

batch_size = 1

guidance_scale = 65.0

latents = sample_latents(

batch_size=batch_size,

model=model,

diffusion=diffusion,

guidance_scale=guidance_scale,

model_kwargs=dict(texts=[prompt] * batch_size),

progress=True,

clip_denoised=True,

use_fp16=True,

use_karras=True,

karras_steps=64,

sigma_min=1E-3,

sigma_max=160,

s_churn=0,

)

cameras = create_pan_cameras(size, device)

for i, latent in enumerate(latents):

images = decode_latent_images(xm, latent, cameras, rendering_mode=render_mode)

display(gif_widget(images))

# Save the 3D models as .ply and .obj files

for i, latent in enumerate(latents):

t = decode_latent_mesh(xm, latent).tri_mesh()

with open(f'example_mesh_{i}.ply', 'wb') as f:

t.write_ply(f)

with open(f'example_mesh_{i}.obj', 'w') as f:

t.write_obj(f)

# Load the image to condition on

img = Image.open(image_path)

# Produce a sample from the model

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print('creating base model...')

base_name = 'base40M'

base_model = model_from_config(MODEL_CONFIGS[base_name], device)

base_model.eval()

base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name])

print('creating upsample model...')

upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)

upsampler_model.eval()

upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])

print('downloading base checkpoint...')

base_model.load_state_dict(load_checkpoint(base_name, device))

print('downloading upsampler checkpoint...')

upsampler_model.load_state_dict(load_checkpoint('upsample', device))

sampler = PointCloudSampler(

device=device,

models=[base_model, upsampler_model],

diffusions=[base_diffusion, upsampler_diffusion],

num_points=[1024, 4096 - 1024],

aux_channels=['R', 'G', 'B'],

guidance_scale=[3.0, 3.0],

)

# Load the image to condition on

img = Image.open(image_path)

# Produce a sample from the model

samples = None

for x in tqdm(sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(images=[img]))):

samples = x

# Convert the output to point clouds

pc = sampler.output_to_point_clouds(samples)[0]

# Plot the point cloud

fig = plot_point_cloud(pc, grid_size=3, fixed_bounds=((-0.75, -0.75, -0.75),(0.75, 0.75, 0.75)))

fig.show(renderer="colab")

# Generate the mesh

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print('creating SDF model...')

name = 'sdf'

model = model_from_config(MODEL_CONFIGS[name], device)

model.eval()

print('loading SDF model...')

model.load_state_dict(load_checkpoint(name, device))

# Produce a mesh (with vertex colors)

mesh = marching_cubes_mesh(

pc=pc,

model=model,

batch_size=4096,

grid_size=32,

progress=True,

)

# Write the mesh to a PLY file

with open('output.obj', 'wb') as f:

mesh.write_obj(f)

# Example usage
image_path = 'path/to/image.jpg'
prompt = 'example prompt'
generate_3d_model(image_path, prompt)

惊!天降老公竟是首富许南歌霍北宴(最新章节无弹窗)全文许南歌阅读[嘻嘻][来]!"fiG❗书名:《惊!天降老公竟是首富》
❗主角:许南歌霍北宴
全文请到公#众#号〖岁月书摘〗发个 主角名,就行了“别多想。” 许南歌忽然说了这么一句,霍北宴一听,还以为她听进去了,刚要松一口气,就听见她又补了一句:“你们一个也跑不了。” 第525章报复 又过了两三天。 新闻上爆出了一则消息,霍北宴找社会上人员企图对圈内女明星不轨,不法分子良心发现,向警方自首。 一时间,霍北宴名声更是岌岌可危。 全网人都在讨伐她。 胡玉汐得知这些消息的时候,有些坐立难安。 “我一直以为霍北宴退圈,事情就应该过去了,但是没想到,他们完全不给一点机会,居然把人往绝路上逼。” “有我在,你担心什么?” 坐在对面和她共赏夜景的优雅男人不紧不慢的吃了一小口牛排,十分惬意,和她的焦灼完全相反。 胡玉汐看了看他,见他高贵随意的姿态,欲言又止。 似乎是看出了她的意思,男人又眼皮子一掀,说:“你还担心会报复到你身上?” “是有点。” 她回答道:“我不知道为什么许南歌对我有那么大的敌意,我一开始明明想和她交好,但是她一点面子不给,反而还三番两次终止了跟我的合作,我也不知道是哪里得罪过她,海平,我现在可以依靠的只有你了,你可不能不管我。” 说到最后,胡玉汐还跟他撒起了娇。 她很美,性格一向强势,但此时服软做小的模样更是别有一番风味,能极大的满足男人的那点大男子主义。 陆海平看着她的眼神果然变得深了点,但细看,眼底依旧一片清明。“许南歌,你现在真有钱的话,不如先去医院挂个脑科或者精神科好吗?”我扭头就走,懒得和她废话。 “纪辛瑜,你直到现在依然不把我放在眼里是吧?”许南歌有些恼怒地拦住了我。 我视若无睹,拨开她便继续往前走。 许南歌拽我的手,我拎着的礼盒被她扯落在地上,发出一声闷响。 许南歌还想上前抓我的手,我抬手挡了一下,她竟然直接跌坐在地上,仿佛我用了极大的力气一样。 陶叶急忙去扶许南歌,然后指责我,“纪辛瑜你有病吧?你推她干什么!” 我蹲下身子把礼盒捡起来,起身时却看到霍北宴正站在不远处看着我。 “抱歉,我不是故意的。” 当我们的动静引来了别人的关注,许南歌一改刚才的气急败坏,反而是开口道歉。 我起身,不免有些好笑,“许南歌,你不知道这种地方四处都有监控吗?装什么无辜呢? 一听到监控,许南歌不自然地看了一眼四周,最后视线落在了霍北宴身上。 霍北宴却只是看着我,我略有心虚。 昨天的事我都还没太敢仔细去想,本来打算这段时间疏远一点,大家都清醒一下。 没想到今天又在这里碰上了,真是有毒,干脆就假装没看到? 我心一横,拎着礼盒转身便走,准备去找何远之,把贺礼带到以后就离开这里,毕竟同时遇到了许南歌和霍北宴,证明此地不宜久留。 “纪辛瑜!”霍北宴的声音不算高,却清晰地传入我的耳中,他三两步跟上了我的步伐,与我并肩。


发布     👍 0 举报 写留言 🖊   
✋热门推荐
  • ?Yesterday, i went to buy some food with my mom. I told her that me and my bro w
  • 固而檀香的产量很受限制,人们对它的需求又很大,所以从古至今,它一直都是即珍稀又昂贵的木材。故而檀香的产量很受限制,人们对它的需求又很大,所以从古至今,它一直都是
  • 他表示,单个抗体取代“抗体对”的特点使它的生产成本将是其他候选药的三分之一以下,目前丹序已与国药集团中国生物达成合作开发意向。北大团队在短短几个月内实现了对80
  • #原耽推文[超话]# 近闻林生斌之事,乃吾辈楷模。未想竟有好事者将此事传到天上,天界亦一片哗然:   李太白正在月宫饮酒,闻此事,挥笔一诗: 吴起杀妻献鲁君,
  • 上次和这个小孩的相处时间并没有很长,他本来就有活动很忙,被这“小碰瓷”撞上后拎着人去了广播站,活动方催得紧,他本打算交给工作人员就走,结果那粗心大意的家长就那样
  • 21.你永远也不晓得自己有多喜欢一个人,除非你看见他和别的人在一起。4.我遇见那么多人,可为什么偏偏是你,看起来最应该是过客的你,却在我心里占据这么重要的位子。
  • . #名誉虚荣##自以为满足##荣誉##来自内心##顺服和听命##品格与内涵##ZionDaily#品格与内涵(四十) . 荣誉是人格光辉的表现,也是整个人生
  • 只是我没想到的是,就连亲人都可以做到这样,而且是做到了极致!【总是为别人考虑这考虑那】戴着面具面对这个世界,真累,差点找不到自己了,希望有人能一眼看透我的伪装,
  • (三称)唯三圣人。常持诵三官宝诰可获福无量。
  • 故事的最后,我认为“我”在目睹了王一生成为棋王后其实心里是十分羡慕的,他可以在心里下棋,“我”爱看书爱看电影却是没有办法实现,“夜黑黑的,伸手不见五指”只能安慰
  • 之前想念到难受的时候,朋友跟我说,论从来没有把一个人置顶的好处,也不会把和某一个人的聊天记录都保存,这样就可能不会让自己在某一刻觉得难受。[伤心]之前我有很长一
  • ”直男大哥表示,哦,长歌行啊,女朋友在看,所以车卖吗?翻译:“弟,我的眼里没有你”吴磊弟弟看了很疑惑:哥,你不是山地大佬吗?
  • ✅递签住房证明✅法国公寓预定✅全套境外服务疫情之下赴法留学安全和方便最重要,1v1贴心服务,解决大家初到法国的住宿和行政问题,有需要的小伙伴欢迎联系我呀#法国租
  • 而同时,你为了确认他到底是一个什么样的状态,不断用一些方式方法去调查他,或者去扰动他身边的朋友或者家人,那么你们之前存在的一些矛盾,很可能在这种扰动中被放大,让
  • 采用天然野草莓提炼的草莓味道,能有效保湿,防止嘴巴干燥起皮,让小朋友的嘴巴恢复红润。阿执继续查看自己所设陷阱,剩下三个,一个是空的,另外两个分别抓了一大一小的两
  • 虽然看起来无痛胃镜与普通胃镜并无太大区别,只是为患者注射了静脉麻醉剂,但无痛胃镜给患者带来的好处还是很多的,下面我们就一起来看看无痛胃镜都有哪些优势吧。北京德胜
  • (这个分享不了,大家自己看编/辑/记/录P1吧[允悲])自从开学后很难喝到咖啡到现在实现咖啡自由我太幸福了最近每天单曲循环Boring day(也很爱)隔壁寝室
  • 。真诚地希望所有看帖子的小伙伴,都能以一个比较理想的价格拿下适合自己的冰箱。
  • 孙悟空提议,国王把金光寺改名为伏龙寺,他的理由是金是流动之物,光是闪烁之物,所以舍利才会流散,改成伏龙寺,舍利就能让祭赛国永远长存。九头虫夫妇辛辛苦苦弄来舍利子
  • 一个人有多好,一点都不重要,一个人对你有多好才重要,在这个放荡不羁又充满语惑的世界里,如果有一个人愿意陪伴你,给你花时间,给你安全感,为个你承担那份责任,他一定