Author: Zhe Huang (zhehuang@andrew.cmu.edu)
!which python
/home/zheh/anaconda3/envs/16889/bin/python
!pwd
/home/zheh/Documents/CMU_16_889_22spring/assignment1
# !pip install -q mediapy
import mediapy as media
import torch
import pytorch3d
import numpy as np
import imageio
import matplotlib.pyplot as plt
%matplotlib notebook
from starter.render_mesh import render_cow
rendered = render_cow()
plt.imshow(rendered)
<matplotlib.image.AxesImage at 0x7f094a37e910>
from starter.render_mesh import render_cow_with_transform
rendered_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(3, 180, deg)
_rendered = render_cow_with_transform(R=R, T=T)
rendered_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_1_1.gif', rendered_list, fps=15)
gif = media.read_video('output/p_1_1.gif')
media.show_video(gif, height=500, codec='gif')
from starter.dolly_zoom import dolly_zoom
dolly_zoom()
dolly = media.read_video('output/dolly.gif')
media.show_video(dolly, height=500, codec='gif')
from starter.render_mesh import render_tetrahedron_with_transform
tetrahedron_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(3, 235, deg)
_rendered = render_tetrahedron_with_transform(R=R, T=T)
tetrahedron_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_2_1.gif', tetrahedron_list, fps=15)
tetrahedron_gif = media.read_video('output/p_2_1.gif')
media.show_video(tetrahedron_gif, height=500, codec='gif')
It should have 4 vertices and 4 faces
from starter.render_mesh import render_cube_with_transform
cube_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(4, 135, deg)
_rendered = render_cube_with_transform(R=R, T=T)
cube_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_2_2.gif', cube_list, fps=15)
cube_gif = media.read_video('output/p_2_2.gif')
media.show_video(cube_gif, height=500, codec='gif')
It should have 6 vertices and 12 faces
from starter.render_mesh import render_cow_with_retexture
rendered_retexture_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(3, 180, deg)
_rendered = render_cow_with_retexture(R=R, T=T)
rendered_retexture_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_3.gif', rendered_retexture_list, fps=15)
retexture_gif = media.read_video('output/p_3.gif')
media.show_video(retexture_gif, height=500, codec='gif')
I chose the same set of colors as in the demo, i.e. color1 = [0, 0, 1]
and color2 = [1, 0, 0]
.
So the R_relative
is an additional rotational matrix that will rotate the camera along a given axis clockwisely relatively to its initial position. Similarly, T_relative
is an additional translational matrix that will translate the camera along the negative direction of given axis (e.g. [1, 0, 0]
moves the camera 1 uint towards the -X
direction) relatively to its initial position.
from starter.camera_transforms import render_textured_cow
R_relatives = [
[[0, 1, 0], [-1, 0, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [-1, 0, 0]],
]
T_relatives = [
[0, 0, 0],
[0, 0, 3],
[0.5, -0.5, 0],
[-3, 0, 3],
]
textured_dict = {}
for R, T in zip(R_relatives, T_relatives):
_rendered = render_textured_cow(
R_relative=R,
T_relative=T,
)
textured_dict[f'R={R}, T={T}'] = _rendered
media.show_images(textured_dict, height=500, columns=2)
R=[[0, 1, 0], [-1, 0, 0], [0, 0, 1]], T=[0, 0, 0] | R=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], T=[0, 0, 3] |
R=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], T=[0.5, -0.5, 0] | R=[[0, 0, 1], [0, 1, 0], [-1, 0, 0]], T=[-3, 0, 3] |
from starter.render_generic import render_plant_with_transform
rendered_plant_1_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(6, 0, deg, up=((0, -1, 0),))
_rendered = render_plant_with_transform(R=R, T=T)
rendered_plant_1_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_5_1_1.gif', rendered_plant_1_list, fps=15)
plant_1_gif = media.read_video('output/p_5_1_1.gif')
# media.show_video(plant_1_gif, height=500, codec='gif')
rendered_plant_2_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(6, 0, deg, up=((0, -1, 0),))
_rendered = render_plant_with_transform(plant_num=2, R=R, T=T)
rendered_plant_2_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_5_1_2.gif', rendered_plant_2_list, fps=15)
plant_2_gif = media.read_video('output/p_5_1_2.gif')
# media.show_video(plant_2_gif, height=500, codec='gif')
rendered_plant_union_list = []
for frame_1, frame_2 in zip(rendered_plant_1_list, rendered_plant_2_list):
rendered_plant_union_list.append(np.minimum(frame_1, frame_2))
imageio.mimsave('output/p_5_1_3.gif', rendered_plant_union_list, fps=15)
plant_union_gif = media.read_video('output/p_5_1_3.gif')
# media.show_video(plant_union_gif, height=500, codec='gif')
plant_gifs = {
'first image': plant_1_gif,
'second image': plant_2_gif,
'union': plant_union_gif,
}
media.show_videos(plant_gifs, height=325)
first image | second image | union |
from starter.render_generic import render_torus_with_transform
rendered_torus_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(4, 0, deg)
_rendered = render_torus_with_transform(R=R, T=T)
rendered_torus_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_5_2.gif', rendered_torus_list, fps=15)
torus_gif = media.read_video('output/p_5_2.gif')
media.show_video(torus_gif, height=500, codec='gif')
from starter.render_generic import render_torus_mesh_with_transform
rendered_torus_mesh_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(4, 0, deg)
_rendered = render_torus_mesh_with_transform(R=R, T=T)
rendered_torus_mesh_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_5_3.gif', rendered_torus_mesh_list, fps=15)
torus_mesh_gif = media.read_video('output/p_5_3.gif')
media.show_video(torus_mesh_gif, height=500, codec='gif')
Discussion: rendering as mesh definitely results in a higher quality output as there's fewer artifacts caused by insufficient sampling of point clouds. This will be more noticeable when it comes to render complex objects such as a cow. However, rendering mesh usually takes more memory and computations. The more values in the voxel grid, the more accurate the generated mesh will be and the more computations it will take. If the number of voxel values is not enough, the resulted image maybe well distorted. On the other hand, rendering point cloud is usually faster and more flexible as the number of samples can be chosen from a wide variety of values.
A bolt that does the tightening.
from starter.render_mesh import render_diy_with_transform
rendered_bolt_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(6, 0, deg)
_rendered = render_diy_with_transform(R=R, T=T)
rendered_bolt_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_6.gif', rendered_bolt_list, fps=15)
bolt_gif = media.read_video('output/p_6.gif')
media.show_video(bolt_gif, height=500, codec='gif')
from starter.render_mesh import get_cow_mesh, sample_mesh_to_pointcloud, render_mesh_with_transform
from starter.render_generic import render_pointcloud_with_transform
mesh = get_cow_mesh()
rendered_mesh_orig_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(3, 180, deg)
_rendered = render_mesh_with_transform(mesh, R=R, T=T)
rendered_mesh_orig_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_7_0.gif', rendered_mesh_orig_list, fps=15)
orig_gif = media.read_video('output/p_7_0.gif')
# media.show_video(orig_gif, height=500, codec='gif')
pc_s10 = sample_mesh_to_pointcloud(mesh, n_samples=10)
rendered_pc_s10_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(3, 180, deg)
_rendered = render_pointcloud_with_transform(pc_s10, R=R, T=T)
rendered_pc_s10_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_7_10.gif', rendered_pc_s10_list, fps=15)
s10_gif = media.read_video('output/p_7_10.gif')
# media.show_video(s10_gif, height=500, codec='gif')
pc_s100 = sample_mesh_to_pointcloud(mesh, n_samples=100)
rendered_pc_s100_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(3, 180, deg)
_rendered = render_pointcloud_with_transform(pc_s100, R=R, T=T)
rendered_pc_s100_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_7_100.gif', rendered_pc_s100_list, fps=15)
s100_gif = media.read_video('output/p_7_100.gif')
# media.show_video(s100_gif, height=500, codec='gif')
pc_s1000 = sample_mesh_to_pointcloud(mesh, n_samples=1000)
rendered_pc_s1000_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(3, 180, deg)
_rendered = render_pointcloud_with_transform(pc_s1000, R=R, T=T)
rendered_pc_s1000_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_7_1000.gif', rendered_pc_s1000_list, fps=15)
s1000_gif = media.read_video('output/p_7_1000.gif')
# media.show_video(s1000_gif, height=500, codec='gif')
pc_s10000 = sample_mesh_to_pointcloud(mesh, n_samples=10000)
rendered_pc_s10000_list = []
for deg in range(0, 360, 5):
R, T = pytorch3d.renderer.look_at_view_transform(3, 180, deg)
_rendered = render_pointcloud_with_transform(pc_s10000, R=R, T=T)
rendered_pc_s10000_list.append((_rendered * 255).astype(np.uint8))
imageio.mimsave('output/p_7_10000.gif', rendered_pc_s10000_list, fps=15)
s10000_gif = media.read_video('output/p_7_10000.gif')
# media.show_video(s10000_gif, height=500, codec='gif')
pointcloud_gifs = {
'original mesh': orig_gif,
'10 points': s10_gif,
'100 points': s100_gif,
'1000 points': s1000_gif,
'10000 points': s10000_gif,
}
media.show_videos(pointcloud_gifs, columns=3, height=325)
original mesh | 10 points | 100 points |
1000 points | 10000 points |