导图社区 TA工程师
这是一篇关于TA工程师的思维导图,包含计算机图形学、 图形图像API、游戏引擎、 自研渲染引擎、 平台相关等。
编辑于2024-02-29 14:46:56TA工程师
计算机图形学
图形学算法
鱼眼环视拼接实现
使用opencv实现,获取到投影变换矩阵将图像去畸变后拼接处理
资料
https://zhuanlan.zhihu.com/p/569025736 https://zhuanlan.zhihu.com/p/623855004 https://blog.csdn.net/sunnyrainflower/article/details/133522608 https://blog.csdn.net/wxr19890830/article/details/93172810 https://zhuanlan.zhihu.com/p/641968268
贝塞尔曲线
绘制矢量线
资料
https://blog.csdn.net/ryfdizuo/article/details/9719295
渲染
PBR验证:https://www.shadertoy.com/view/4sSfzK
旋转
矩阵旋转
欧拉角旋转
四元数旋转
三种方式利弊,相互转换
AABB
基础图元
几何
显式几何
通过绘制三角形绘制:圆,矩形等各种形状
隐式几何(数学可视化)
一手资料: 《the shader of book》 https://github.com/patriciogonzalezvivo/thebookofshaders 数学可视化 https://www.bilibili.com/video/BV1YX4y1z7Zy/?spm_id_from=pageDriver&vd_source=090943f493e4c3d7363c03111e73bdb8
有向距离场(SDF)
void mainImage( out vec4 fragColor, in vec2 fragCoord ) { vec2 uv = fragCoord/iResolution.xy; float ratio = iResolution.y/iResolution.x; //统一x ,y 度量比例,让图形保持比例 uv = vec2(uv.x,uv.y*ratio); //uv扩展 uv =uv * 2.0 -1.0; //欧式距离 float dist = sqrt(uv.x* uv.x + uv.y *uv.y) *0.9; //曼哈顿距离 出租车距离: 两点之间x+y的距离 //float dist = (abs(uv.x) + abs(uv.y)) * 0.6; vec4 finalColor = vec4(dist,dist,dist,1.0); fragColor = finalColor; } 
各种形状
sdf矩形推导 https://www.cnblogs.com/peixu/p/17703571.html sdf各种形状一手公式源 https://iquilezles.org/articles/distfunctions2d/ 各种形状 https://blog.csdn.net/qw8704149/article/details/1214127
光线步进(RayMarching/RayCast)
https://www.bilibili.com/video/BV1X7411F744?p=13&vd_source=090943f493e4c3d7363c03111e73bdb8
3D实现 相机 投射光线 步进 碰撞
步进算法原理    #version 330 core in vec2 fragTexCoord; // 纹理坐标 out vec4 fragColor; // 输出的颜色 uniform sampler3D volumeTexture; // 体积纹理 uniform vec3 volumeSize; // 体积数据的尺寸 uniform vec3 cameraPosition; // 相机位置 // 定义光线结构 struct Ray { vec3 origin; vec3 direction; }; // 距离场函数,计算光线与场景中物体的最近交点 float distanceField(vec3 position) { // 在这里实现距离场函数的计算,例如使用球体、立方体等表示物体 // 返回光线到最近物体表面的距离 // 如果光线不与物体相交,则返回一个大的距离值 } // 光线步进函数,追踪光线并计算渲染颜色 vec4 rayMarching(Ray ray) { float totalDistance = 0.0; vec3 currentPosition = ray.origin; for (int i = 0; i < MAX_ITERATIONS; i++) { // 计算当前位置处的距离场值 float distance = distanceField(currentPosition); // 更新当前位置和累积距离 currentPosition += ray.direction * distance; totalDistance += distance; // 检查终止条件 if (distance < EPSILON || totalDistance > MAX_DISTANCE) { break; } } // 根据最终位置计算渲染颜色 vec4 finalColor = vec4(0.0); if (totalDistance < MAX_DISTANCE) { // 通过体积纹理采样获取颜色值 vec3 textureCoords = currentPosition / volumeSize; finalColor = texture(volumeTexture, textureCoords); } return finalColor; } void main() { // 计算从相机位置到当前片段位置的光线 Ray ray; ray.origin = cameraPosition; ray.direction = normalize(fragTexCoord.xy - 0.5); // 进行光线步进计算 vec4 color = rayMarching(ray); // 输出渲染颜色 fragColor = color; } ```
隐式3D建模
步进深度/法向计算
初学者SDF DEMO:https://www.shadertoy.com/view/XXsGz4  //Some of the sources I used to construct this scene: //Distance Functions (https://iquilezles.org/articles/distfunctions) //https://www.reddit.com/r/twotriangles/comments/1hy5qy/tutorial_1_writing_a_simple_distance_field/ float sphere (vec3 rayPos, vec3 center, float radius) //sphere primitive with offset controls { return length(rayPos - center) - radius; } float sdPlane( vec3 rayPos, vec4 n ) { n = normalize(n); // orientation of plane, n must be normalized! return dot(rayPos,n.xyz) + n.w; } float uberSphere(vec3 rayPos) //a more complex sphere shape using booleans { rayPos -= vec3(0.0, sin(iTime * 2.0) +2.0, 0.0); //bounce animation //sphere return sphere(rayPos, vec3(0.0, 0.0, 0.0), 2.0); } //the final assembled scene float scene(vec3 rayPos) { float dist_a = sphere(rayPos, vec3(-0.0, -0.0, 0.0), 3.0); float dist_b = sdPlane(rayPos, vec4(0.0, 1.0, 0.0, 1.0)); float booleanFloor = max(dist_b, -dist_a); // this cuts a sphere into the plane //return min(uberSphere(rayPos),booleanFloor); //this combines the floor with our uber sphere for the final scene return min(uberSphere(rayPos),dist_b); // simplifed the scene with this line. comment this out and uncomment the above line for some additional boolean logic } //normals vec3 normal(vec3 rayPos) { vec3 e = vec3(0.01, 0.0, 0.0); return normalize(vec3(scene(rayPos + e.xyy) - scene(rayPos - e.xyy), scene(rayPos + e.yxy) - scene(rayPos - e.yxy), scene(rayPos + e.yyx) - scene(rayPos - e.yyx))); } //fresnel float fresnel(vec3 n, vec3 eye) { return 1.0 - max(dot(n,eye), 0.0); } //Lambert Diffuse Lighting float diffuse(vec3 normal, vec3 lightVector) { return max(dot(normal, lightVector), 0.0); } //hard shadows. Now we march from the current rayPos/Surface toward the lightsource. //If we hit an object in the scene then the surface point is in shadow and we return 0.0 float shadow(vec3 rayPos, vec3 lightDir, vec3 normal) { float sVal = 1.0; //initial shadow value.sVal gets mutiplied with diffuse lighting float sEPS = 0.01;// our shadow epsilon/precision value vec3 ro = rayPos + (normal * sEPS); //reduces self-shadow artifacts since we are starting the march slightly above our surface vec3 rd = lightDir; // we are now marching from our surface to light source. float sDist; //initializing our shadow distance value for(int i = 0; i < 36; i++) { sDist = scene(ro); //comparing shadow ray position with our scene if(sDist < sEPS) { sVal = 0.0; break; } ro += rd * sDist; } return sVal; } //reflections float reflection(vec3 eye, vec3 rayPos, vec3 n, vec3 lightDir) { float rVal = 0.0; //reflection boolean value: 0 = miss, 1 = hit vec3 refVec = normalize(reflect(-eye, n)); //normalized reflection vector float rEPS = 0.01; //reflection EPSILON vec3 ro = rayPos + (n * rEPS);// starts marching slightly "above" our surface to lessen artifacts vec3 rd = refVec; float rDist; //initializing reflection distance value. This gets plugged into the scene function float rShadowVal = 1.0; for(int i = 0; i < 24; i++) { rDist = scene(ro); if(rDist < rEPS) { float rDiffuseVal = max(dot(normal(ro), lightDir), 0.0); if(rDiffuseVal > 0.0) //then we calculate reflection shadow ray { rShadowVal = shadow(ro, lightDir, normal(ro)); } rVal = rDiffuseVal * rShadowVal; break; } ro += rd * rDist; } return rVal; } void mainImage( out vec4 fragColor, in vec2 fragCoord ) { // color outs vec3 color = vec3(0.0, 0.0, 0.0); float alpha = 1.0; //Scene directional light vec3 lightPos = vec3( sin(iTime * 2.0) * 5.0, 3.0, cos(iTime * 2.0) * 5.0); //this makes the lightsource rotate around the scene center vec3 lightDir = normalize(lightPos - vec3(0.0, 0.0, 0.0)); //normalized light vector derived from lightPos. This vector is useful for shading and marching shadow rays //Normalized device coordinates and aspect correction vec2 uv = fragCoord.xy / iResolution.xy; uv = uv * 2.0 - 1.0; // remap range from 0...1 to -1...1 float aspectRatio = iResolution.x/ iResolution.y; uv.x *= aspectRatio; //aspect correction //Mouse values for navigation or other shenanigans. Normalized device coords and aspect correction to match UVs vec2 daMouse = iMouse.xy/ iResolution.xy; daMouse = daMouse * 2.0 - 1.0; daMouse.x *= aspectRatio; // camera controls (horizontal mouse = rotate, vertical mouse = elevation) vec3 camControls; camControls.x = sin(daMouse.x * 2.0) * 5.0; camControls.y = (daMouse.y * 0.5 + 0.5) * 9.0; camControls.z = cos(daMouse.x * 2.0) * 5.0; //mapping camera to UV cordinates vec3 cameraOrigin = vec3(camControls); //cam controls vec3 cameraTarget = vec3(0.0, 0.0, 0.0); vec3 upVector = vec3(0.0, 1.0, 0.0); vec3 cameraDirection = normalize(cameraTarget - cameraOrigin); vec3 cameraRight = normalize(cross(upVector, cameraOrigin)); vec3 cameraUp = cross(cameraDirection, -cameraRight); //negate cameraRight to flip properly? vec3 rayDir = normalize(cameraRight * uv.x + cameraUp * uv.y + cameraDirection); //Precision value used in the ray marching loop below. This number equals our "surface". If the distance returned from rayPos //to our scene function is less than this then we have "touched" our object and break out of the loop to do normals and lighting const float EPSILON = 0.01; //inital ray position per pixel. This is the value that gets marched forward and tested vec3 rayPos = cameraOrigin; float shadowVal = 1.0; for (int i = 0; i < 200; i++) // the larger the loop the more accurate/slower the render time { float dist = scene(rayPos); // plug current rayPos into our scene function if (dist < EPSILON) //then the ray has hit our surface so we calculate normals and lighting at this point { vec3 n = normal(rayPos); vec3 eye = normalize(cameraOrigin - rayPos); float diffuseVal = diffuse(n, lightDir); if(diffuseVal > 0.0) //then we calculate shadow ray { shadowVal = shadow(rayPos, lightDir, n); } float refVal = reflection(eye, rayPos, n, lightDir); float fresnelVal = fresnel(n, eye); color = vec3((diffuseVal * shadowVal) + (refVal * fresnelVal)); break; } rayPos += dist * rayDir; //if nothing is hit we march forward and try again } fragColor = vec4(color,alpha);//final color output }
SDF几何曲面法线求解
曲面的法向量的求法https://zhuanlan.zhihu.com/p/351197501 float scene(vec3 rayPos) { // float dist_a = sphere(rayPos, vec3(-0.0, -0.0, 0.0), 3.0); float dist_a = sdBox(rayPos,vec3(1.0,1.0,1.0)); float dist_b = sdPlane(rayPos, vec4(0.0, 1.0, 0.0, 1.0)); float booleanFloor = max(dist_b, -dist_a); // this cuts a sphere into the plane // return min(uberSphere(rayPos),booleanFloor); //this combines the floor with our uber sphere for the final scene return min(uberSphere(rayPos),dist_b); // simplifed the scene with this line. comment this out and uncomment the above line for some additional boolean logic } //normals vec3 normal(vec3 rayPos) { vec3 e = vec3(0.01, 0.0, 0.0); return normalize(vec3(scene(rayPos + e.xyy) - scene(rayPos - e.xyy), scene(rayPos + e.yxy) - scene(rayPos - e.yxy), scene(rayPos + e.yyx) - scene(rayPos - e.yyx))); }
阴影:梯度函数计算sdf表面法线
https://www.jianshu.com/p/d34afd45f28e https://blog.csdn.net/weixin_44683202/article/details/128853058 vec3 estimateNormal(vec3 p) { return normalize(vec3( sceneSDF(vec3(p.x + EPSILON, p.y, p.z)) - sceneSDF(vec3(p.x - EPSILON, p.y, p.z)), sceneSDF(vec3(p.x, p.y + EPSILON, p.z)) - sceneSDF(vec3(p.x, p.y - EPSILON, p.z)), sceneSDF(vec3(p.x, p.y, p.z + EPSILON)) - sceneSDF(vec3(p.x, p.y, p.z - EPSILON)) )); }
基础
欧式距离
曼哈顿距离
UV
内置函数
smoothstep
https://zhuanlan.zhihu.com/p/157758600 min max 顺序不同 函数变换不同 void mainImage( out vec4 fragColor, in vec2 fragCoord ) { const float scale = 5.0; vec2 uv = fragCoord/iResolution.xy; float ratio = iResolution.y/iResolution.x; uv = (uv *2.0-1.0)*scale ; //smoothstep(min,max); 0-> min -> max -> 1 小于等于min =0 大于等于max =1 //smoothstep(max,min); 1-> min -> max -> 0 小于等于min =1 大于等于max =0 // float yValue =1.0- smoothstep(0.0,0.01,abs(uv.y/5.0)); // float xValue =1.0 - smoothstep(0.0,0.01,abs(uv.x/5.0)); //uv(1-> 0->1) value : 0 -> 0.01 -> 1 ->0.01 -> 0 // float value = smoothstep(0.01,0.0,abs(uv.y/5.0)); //uv(1-> 0->1) value : 1 -> 0.01 -> 0 ->0.01 -> 1 float value = smoothstep(0.01,0.0,abs(uv.y/5.0)) + smoothstep(0.01,0.0,abs(uv.x/5.0)); fragColor = vec4(value,value,value,1.0); } 
极坐标
https://zhuanlan.zhihu.com/p/203377553 
隐式 图形变换
平移/旋转/缩放
随机噪波实现
SDF阴影
硬阴影
软阴影
环境光遮蔽
SSAO
常用AO算法
shader资料:https://thebookofshaders.com/07/?lan=ch https://github.com/patriciogonzalezvivo/thebookofshaders
示例
基础sdf 带有实时反射 https://www.shadertoy.com/view/4Xl3Dr
实时反射原理
求多次光线反弹,将颜色叠加到uv像素。 1.步进碰撞 -》 2. 碰撞点-》 3. 计算反射方向-》 4. 延反射方向碰撞点开始步进-》 5. 反弹碰撞 6. 求光线叠加 for (int i = 0; i < 260; i++) // the larger the loop the more accurate/slower the render time { float dist = scene(rayPos); // plug current rayPos into our scene function if (dist < EPSILON) //then the ray has hit our surface so we calculate normals and lighting at this point { vec3 n = normal(rayPos); vec3 eye = normalize(cameraOrigin - rayPos); float diffuseVal = diffuse(n, lightDir); if(diffuseVal > 0.0) //then we calculate shadow ray { shadowVal = shadow(rayPos, lightDir, n); } float refVal = reflection(eye, rayPos, n, lightDir); float fresnelVal = fresnel(n, eye); vec3 matcolor = vec3(1.00,1.00,0.90); // color = vec3((diffuseVal * shadowVal) * matcolor + (refVal * fresnelVal)); color = vec3((diffuseVal * shadowVal ) * matcolor + (refVal * fresnelVal)); break; } rayPos += dist * rayDir; //if nothing is hit we march forward and try again } /reflections float reflection(vec3 eye, vec3 rayPos, vec3 n, vec3 lightDir) { float rVal = 0.0; //reflection boolean value: 0 = miss, 1 = hit vec3 refVec = normalize(reflect(-eye, n)); //normalized reflection vector float rEPS = 0.01; //reflection EPSILON vec3 ro = rayPos + (n * rEPS);// starts marching slightly "above" our surface to lessen artifacts vec3 rd = refVec; float rDist; //initializing reflection distance value. This gets plugged into the scene function float rShadowVal = 1.0; for(int i = 0; i < 40; i++) { rDist = scene(ro); if(rDist < rEPS) { float rDiffuseVal = max(dot(normal(ro), lightDir), 0.0); if(rDiffuseVal > 0.0) //then we calculate reflection shadow ray { rShadowVal = shadow(ro, lightDir, normal(ro)); } rVal = rDiffuseVal * rShadowVal; break; } ro += rd * rDist; } return rVal; } 
图像处理算法
线性代数
向量运算
加法、减法
点乘
https://zhuanlan.zhihu.com/p/359975221
叉乘
https://zhuanlan.zhihu.com/p/359975221
向量颜色运算
矩阵运算
矩阵乘法
逆矩阵、转置
正交、透视、斜切、镜像
矩阵变换
2D/3D 旋转、平移、缩放
https://blog.csdn.net/JuniorChestnut/article/details/107818933
矩阵推导
特性
仿射变换
矩阵推导
特性
透视变换
矩阵推导
特性
https://zhuanlan.zhihu.com/p/641970591
齐次坐标
用途及意义
坐标系
局部坐标系
世界坐标系
相机坐标系
变换矩阵推导
图元装配
三角形重心坐标
光栅化
mipmap
双线性插值
超采样
uv坐标
三角函数
https://zhuanlan.zhihu.com/p/390928056
3D渲染
光线追踪:
https://zhuanlan.zhihu.com/p/41269520
材质属性
https://blog.csdn.net/wolf96/article/details/51104060
PBR
微积分
https://www.zhihu.com/tardis/zm/art/94592123?source_id=1005
图形图像API
OpenGL
GLSL
常用内置函数: 内置函数可以简化计算过程,性能更好
https://blog.csdn.net/qq_45477402/article/details/124799493
向量UV坐标系下的变换
https://blog.csdn.net/m0_67555362/article/details/127281364
Shader 基础
https://juejin.cn/column/7053999897778847758
语法: Shader Card github
片段做着色器编程
uv
隐式几何
SDF(有向距离场)
光线步进(RayCast/Ray Marching)
缓存
VAO/VBO/EBO/FBO
基础理论:图形管线、状态机机制、纹理采样
颜色:深度测试、模板测试、混合、立方体贴图、
光照:lambert 、gamma矫正、阴影、法向贴图、视差贴图、HDR、boom、延迟着色
PBR:渲染理论、PBR光照、IBL光照
Vulkan
DirectX
OpenCV
游戏引擎
Unreal engine-开源
蓝图/c++
材质系统
粒子系统
框架架构/引擎机制
引擎API/组件
动画状态机
物理模拟(collider/trigger/raycast等)
UMG界面系统
unity3d
材质系统
粒子系统
C#脚本
引擎API/引擎组件
渲染组件
变换组件
碰撞组件
刚体组件
lumbyard(cryengine)-开源
playCavas-开源
cocos3d-开源
自研渲染引擎
Fliament
基于Filament引擎的Animoji效果实现
https://blog.csdn.net/kaelsass/article/details/117363508?spm=1001.2014.3001.5502
ORGE
o3d
渲染引擎
pbr渲染理论
课程计算机图形学 shader
函数:brdf bsdf btdf 电解质 非电解质 金属 booling-phone lambert DGF 双向分布函数 几何遮蔽 菲涅尔 无ibl光照 ibl光照 渲染原理 : 人眼成像 透视 光线反射原理 折射原理 辐射通量 球面积分
pbr渲染理论
技术:环境光 环境光遮蔽 屏幕空间反射 环境贴图烘焙 path-tracer cubemap-球面坐标 反射 折射linergb ldr hdr gamma矫正
https://zhuanlan.zhihu.com/p/407007915
前向渲染 延迟渲染 G-buffer
pbr函数
PBR白皮书
: https://zhuanlan.zhihu.com/p/56967462
输出颜色 = 直接光输出颜色+环境光输出颜色 BRDF(双向反射分布函数):仅处理受光面,且不考虑次表面散射。适合不透明材质。 BTDF(双向透射分布函数):仅处理背光面,且不考虑次表面散射。 BSDF(双向散射【反射+透射】分布函数):处理受光面和背光面,且不考虑次表面散射。适合透明度比较高的材质。 BSSRDF(双向散射表面反射率分布函数):处理受光面和背光明,考虑次表面散射,适合半透明材质,云,玉石,牛奶等。
基础知识:learnopengl:https://learnopengl-cn.github.io/07%20PBR/01%20Theory/ PBR公式:https://gwb.tencent.com/community/detail/133238 BRDF:https://gwb.tencent.com/community/detail/121027 渲染方程:https://gwb.tencent.com/community/detail/133238 game101/202
光线颜色
颜色相乘和颜色相加意义
:https://blog.csdn.net/fchaiyan/article/details/119700957
两个光相乘代表光线缩小比例
两个光相加往往代表光的合并
例如绿光(0,1,0)经过某点A【RGB(0.3,0.5,0.2)】反射后变成了(0,0.5,0), 红光(1,0,0)经过某点A的反射后变成了(0.3,0,0),那么着两个光投射到眼睛的时候实际上是两个光的合并。 例如Shader中物体的最终颜色=环境光+自发光+漫反射+高光。 两个光的相乘往往代表的是 光强x光反射或者光反射x光强 或者光反射x光反射。结果光发生了衰减或者反射能力发生了变化。
基础光照
lambet:https://gwb.tencent.com/community/detail/125867 game101
阴影
shadowmap
SSAO
gltf模型 pbr实现
gltf viewer
gltf 格式编程
传统经验光照模型
布林冯氏光照模型bling-phone
颜色= 高光+漫反射+环境光
平台相关
Android 渲染机制
https://zhuanlan.zhihu.com/p/661027517 cnblogs.com/mysweetAngleBaby/p/15549126.html
享学课堂:Android渲染机制
GPU渲染原理
https://zhuanlan.zhihu.com/p/649971173
Android图像处理系列:GL多线程的使用 https://blog.csdn.net/kaelsass/article/details/117358047
Skia渲染原理
CG技术
AR/VR/MR/Digetal Human
人眼成像:https://www.sohu.com/a/194800446_699545