十八,镜面IBL-打印预过滤环境贴图

news2024/11/26 0:38:40

前面打印了各个级别的hdr环境贴图,也能看到预过滤环境贴图,现在进行打印各个级别的预过滤环境贴图。

运行结果如下
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
代码如下:
#include <osg/TextureCubeMap>
#include <osg/TexGen>
#include <osg/TexEnvCombine>
#include <osgUtil/ReflectionMapGenerator>
#include <osgDB/ReadFile>
#include <osgViewer/Viewer>
#include <osg/NodeVisitor>
#include <osg/ShapeDrawable>
#include <osgGA/TrackballManipulator>
#include <osgDB/WriteFile>

static const char * vertexShader =
{
//“#version 120 core\n”
“in vec3 aPos;\n”
“varying vec3 localPos;\n”
“void main(void)\n”
“{\n”
“localPos = aPos;\n”
" gl_Position = ftransform();\n"
//“gl_Position = view * view * vec4(aPos,1.0);”
“}\n”
};

static const char psShader =
{
“varying vec3 localPos;\n”
“uniform samplerCube environmentMap;”
“uniform float roughness;”
“const float PI = 3.1415926;”
“float VanDerCorpus(uint n, uint base) "
“{ "
" float invBase = 1.0 / float(base); "
" float denom = 1.0; "
" float result = 0.0; "
" for (uint i = 0u; i < 32u; ++i) "
" { "
" if (n > 0u) "
" { "
" denom = mod(float(n), 2.0); "
" result += denom * invBase; "
" invBase = invBase / 2.0; "
" n = uint(float(n) / 2.0); "
" } "
" } "
“return result; "
“} "
" "
“vec2 HammersleyNoBitOps(uint i, uint N) "
“{ "
" return vec2(float(i) / float(N), VanDerCorpus(i, 2u)); "
“} "
//“float RadicalInverse_Vdc(uint bits)\n”
//”{”
//“bits = (bits << 16u) | (bits >> 16u);”
//“bits = ((bits & 0x55555555u) << 1u ) | (bits & 0xAAAAAAAAu) >> 1u);”
//“bits = ((bits & 0x33333333u) << 2u ) | (bits & 0xCCCCCCCCu) >> 2u);”
//“bits = ((bits & 0x0F0F0F0Fu) << 4u ) | (bits & 0xF0F0F0F0u) >> 4u);”
//“bits = ((bits & 0x00FF00FFu) << 8u ) | (bits & 0xFF00FF00u) >> 8u);”
//“return float(bits) * 2.3283064365386963e-10;”
//”}”
//“vec2 Hammersley(uint i, uint N)”
//”{”
//“return vec2(float(i) / float(N), RadicalInverse_Vdc(i));”
//”}"
“vec3 ImportanceSampleGGX(vec2 Xi, vec3 N, float roughness)”
“{”
“float a = roughness * roughness;”
“float phi = 2.0 * PI * Xi.x;”
"float cosTheta = sqrt((1.0 - Xi.y)/(1.0+(a
a-1.0) * Xi.y));"
“float sinTheta = sqrt(1.0 - cosTheta * cosTheta);”
“vec3 H;”
“H.x = cos(phi) * sinTheta;”
“H.y = sin(phi) * sinTheta;”
“H.z = cosTheta;”
“vec3 up = abs(N.z) < 0.999 ? vec3(0.0,0.0,1.0) : vec3(1.0,0.0,0.0);”
“vec3 tangent = normalize(cross(up,N));”
“vec3 bitangent = cross(N,tangent);”
“vec3 sampleVec = tangent * H.x + bitangent * H.y + N * H.z;”
“return normalize(sampleVec);”
“}”
"void main() "
"{ "
" vec3 N = normalize(localPos); "
" vec3 R = N; "
" vec3 V = R; "
" "
" const uint SAMPLE_COUNT = 1024u; "
" float totalWeight = 0.0; "
" vec3 prefilteredColor = vec3(0.0); "
" for (uint i = 0u; i < SAMPLE_COUNT; ++i) "
" { "
" vec2 Xi = HammersleyNoBitOps(i, SAMPLE_COUNT); "
" vec3 H = ImportanceSampleGGX(Xi, N, roughness); "
" vec3 L = normalize(2.0 * dot(V, H) * H - V); "
" "
" float NdotL = max(dot(N, L), 0.0); "
" if (NdotL > 0.0) "
" { "
" prefilteredColor += texture(environmentMap, L).rgb * NdotL; "
" totalWeight += NdotL; "
" } "
" } "
" prefilteredColor = prefilteredColor / totalWeight; "
" "
" gl_FragColor = vec4(prefilteredColor, 1.0); "
"} "
};
class MyNodeVisitor : public osg::NodeVisitor
{
public:
MyNodeVisitor() : osg::NodeVisitor(osg::NodeVisitor::TRAVERSE_ALL_CHILDREN)
{

}
void apply(osg::Geode& geode)
{
	int count = geode.getNumDrawables();
	for (int i = 0; i < count; i++)
	{
		osg::ref_ptr<osg::Geometry> geometry = geode.getDrawable(i)->asGeometry();
		if (!geometry.valid())
		{
			continue;
		}
		osg::Array* vertexArray = geometry->getVertexArray();
		geometry->setVertexAttribArray(1, vertexArray);

	}
	traverse(geode);
}

};

osg::ref_ptrosg::TextureCubeMap getTextureCubeMap(osgViewer::Viewer& viewer,
int textureWidth,
int textureHeight)
{
unsigned int screenWidth, screenHeight;
osg::GraphicsContext::WindowingSystemInterface * wsInterface = osg::GraphicsContext::getWindowingSystemInterface();
wsInterface->getScreenResolution(osg::GraphicsContext::ScreenIdentifier(0), screenWidth, screenHeight);

osg::ref_ptr<osg::GraphicsContext::Traits> traits = new osg::GraphicsContext::Traits;
traits->x = 0;
traits->y = 0;
traits->width = screenWidth;
traits->height = screenHeight;
traits->windowDecoration = false;
traits->doubleBuffer = true;
traits->sharedContext = 0;
traits->readDISPLAY();
traits->setUndefinedScreenDetailsToDefaultScreen();

osg::ref_ptr<osg::GraphicsContext> gc = osg::GraphicsContext::createGraphicsContext(traits.get());
if (!gc)
{
	osg::notify(osg::NOTICE) << "GraphicsWindow has not been created successfully." << std::endl;
	return NULL;
}


osg::ref_ptr<osg::TextureCubeMap> texture = new osg::TextureCubeMap;

texture->setTextureSize(textureWidth, textureHeight);
texture->setInternalFormat(GL_RGB);
texture->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR);
texture->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
texture->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
texture->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
texture->setWrap(osg::Texture::WRAP_R, osg::Texture::CLAMP_TO_EDGE);

osg::Camera::RenderTargetImplementation renderTargetImplementation = osg::Camera::FRAME_BUFFER_OBJECT;
// front face
{

	osg::ref_ptr<osg::Camera> camera = new osg::Camera;
	camera->setName("Front face camera");
	camera->setGraphicsContext(gc.get());
	camera->setViewport(new osg::Viewport(0, 0, textureWidth, textureHeight));
	camera->setAllowEventFocus(false);
	camera->setRenderTargetImplementation(renderTargetImplementation);
	camera->setRenderOrder(osg::Camera::PRE_RENDER);
	//关联采样贴图
	camera->attach(osg::Camera::COLOR_BUFFER, texture, 0, osg::TextureCubeMap::POSITIVE_Y);

	osg::ref_ptr<osg::Image> printImage = new osg::Image;
	printImage->setFileName(camera->getName());
	printImage->allocateImage(textureWidth, textureHeight, 1, GL_RGBA, GL_UNSIGNED_BYTE);
	texture->setImage(0, printImage);
	camera->attach(osg::Camera::COLOR_BUFFER, printImage);
	viewer.addSlave(camera.get(), osg::Matrixd(), osg::Matrixd());
}


// top face
{
	osg::ref_ptr<osg::Camera> camera = new osg::Camera;
	camera->setName("Top face camera");
	camera->setGraphicsContext(gc.get());
	camera->setViewport(new osg::Viewport(0, 0, textureWidth, textureHeight));
	camera->setAllowEventFocus(false);
	camera->setRenderTargetImplementation(renderTargetImplementation);
	camera->setRenderOrder(osg::Camera::PRE_RENDER);
	//关联采样贴图
	camera->attach(osg::Camera::COLOR_BUFFER, texture, 0, osg::TextureCubeMap::POSITIVE_Z);
	osg::ref_ptr<osg::Image> printImage = new osg::Image;
	printImage->setFileName(camera->getName());
	printImage->allocateImage(textureWidth, textureHeight, 1, GL_RGBA, GL_UNSIGNED_BYTE);
	texture->setImage(1, printImage);
	camera->attach(osg::Camera::COLOR_BUFFER, printImage);
	viewer.addSlave(camera.get(), osg::Matrixd(), osg::Matrixd::rotate(osg::inDegrees(-90.0f), 1.0, 0.0, 0.0));
}

// left face
{
	osg::ref_ptr<osg::Camera> camera = new osg::Camera;
	camera->setName("Left face camera");
	camera->setGraphicsContext(gc.get());
	camera->setViewport(new osg::Viewport(0, 0, textureWidth, textureHeight));
	camera->setAllowEventFocus(false);
	camera->setRenderTargetImplementation(renderTargetImplementation);
	camera->setRenderOrder(osg::Camera::PRE_RENDER);
	//关联采样贴图
	camera->attach(osg::Camera::COLOR_BUFFER, texture, 0, osg::TextureCubeMap::NEGATIVE_X);
	osg::ref_ptr<osg::Image> printImage = new osg::Image;
	printImage->setFileName(camera->getName());
	printImage->allocateImage(textureWidth, textureHeight, 1, GL_RGBA, GL_UNSIGNED_BYTE);
	texture->setImage(2, printImage);
	camera->attach(osg::Camera::COLOR_BUFFER, printImage);
	viewer.addSlave(camera.get(), osg::Matrixd(), osg::Matrixd::rotate(osg::inDegrees(-90.0f), 0.0, 1.0, 0.0) * osg::Matrixd::rotate(osg::inDegrees(-90.0f), 0.0, 0.0, 1.0));
}

// right face
{
	osg::ref_ptr<osg::Camera> camera = new osg::Camera;
	camera->setName("Right face camera");
	camera->setGraphicsContext(gc.get());
	camera->setViewport(new osg::Viewport(0, 0, textureWidth, textureHeight));
	camera->setAllowEventFocus(false);
	camera->setRenderTargetImplementation(renderTargetImplementation);
	camera->setRenderOrder(osg::Camera::PRE_RENDER);
	//关联采样贴图
	camera->attach(osg::Camera::COLOR_BUFFER, texture, 0, osg::TextureCubeMap::POSITIVE_X);
	osg::ref_ptr<osg::Image> printImage = new osg::Image;
	printImage->setFileName(camera->getName());
	printImage->allocateImage(textureWidth, textureHeight, 1, GL_RGBA, GL_UNSIGNED_BYTE);
	texture->setImage(3, printImage);
	camera->attach(osg::Camera::COLOR_BUFFER, printImage);
	viewer.addSlave(camera.get(), osg::Matrixd(), osg::Matrixd::rotate(osg::inDegrees(90.0f), 0.0, 1.0, 0.0) * osg::Matrixd::rotate(osg::inDegrees(90.0f), 0.0, 0.0, 1.0));

}

// bottom face
{
	osg::ref_ptr<osg::Camera> camera = new osg::Camera;
	camera->setGraphicsContext(gc.get());
	camera->setName("Bottom face camera");
	camera->setViewport(new osg::Viewport(0, 0, textureWidth, textureHeight));
	camera->setAllowEventFocus(false);
	camera->setRenderTargetImplementation(renderTargetImplementation);
	camera->setRenderOrder(osg::Camera::PRE_RENDER);
	//关联采样贴图
	camera->attach(osg::Camera::COLOR_BUFFER, texture, 0, osg::TextureCubeMap::NEGATIVE_Z);
	osg::ref_ptr<osg::Image> printImage = new osg::Image;
	printImage->setFileName(camera->getName());
	printImage->allocateImage(textureWidth, textureHeight, 1, GL_RGBA, GL_UNSIGNED_BYTE);
	texture->setImage(4, printImage);
	camera->attach(osg::Camera::COLOR_BUFFER, printImage);
	viewer.addSlave(camera.get(), osg::Matrixd(), osg::Matrixd::rotate(osg::inDegrees(90.0f), 1.0, 0.0, 0.0) * osg::Matrixd::rotate(osg::inDegrees(180.0f), 0.0, 0.0, 1.0));

}

// back face
{
	osg::ref_ptr<osg::Camera> camera = new osg::Camera;
	camera->setName("Back face camera");
	camera->setGraphicsContext(gc.get());
	camera->setViewport(new osg::Viewport(0, 0, textureWidth, textureHeight));
	camera->setAllowEventFocus(false);
	camera->setRenderTargetImplementation(renderTargetImplementation);
	camera->setRenderOrder(osg::Camera::PRE_RENDER);
	//关联采样贴图
	camera->attach(osg::Camera::COLOR_BUFFER, texture, 0, osg::TextureCubeMap::NEGATIVE_Y);
	osg::ref_ptr<osg::Image> printImage = new osg::Image;
	printImage->setFileName(camera->getName());
	printImage->allocateImage(textureWidth, textureHeight, 1, GL_RGBA, GL_UNSIGNED_BYTE);
	texture->setImage(5, printImage);
	camera->attach(osg::Camera::COLOR_BUFFER, printImage);
	viewer.addSlave(camera.get(), osg::Matrixd(), osg::Matrixd::rotate(osg::inDegrees(180.0f), 1.0, 0.0, 0.0));

}

viewer.getCamera()->setProjectionMatrixAsPerspective(90.0f, 1.0, 0.1, 10);



//viewer.getCamera()->setNearFarRatio(0.0001f);
return texture;

}

int main()
{
int level = 0; //0,1,2,3,4
int maxLevel = 4;
float roughness = level * 1.0 / maxLevel;

int textureWidth = 128;
int textureHeight = 128;
float ratio = std::pow(0.5, level);
int mipWidth = textureWidth * ratio;
int mipHeight = textureHeight * ratio;
std::string strDir = "e:/hdr/lod/" + std::to_string(level) + "/";

osg::ref_ptr<osg::TextureCubeMap> tcm = new osg::TextureCubeMap;
tcm->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR);
tcm->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
tcm->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
tcm->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
tcm->setWrap(osg::Texture::WRAP_R, osg::Texture::CLAMP_TO_EDGE);

std::string strImagePosX = strDir + "Right face camera.bmp";
osg::ref_ptr<osg::Image> imagePosX = osgDB::readImageFile(strImagePosX);
tcm->setImage(osg::TextureCubeMap::POSITIVE_X, imagePosX);
std::string strImageNegX = strDir + "Left face camera.bmp";
osg::ref_ptr<osg::Image> imageNegX = osgDB::readImageFile(strImageNegX);
tcm->setImage(osg::TextureCubeMap::NEGATIVE_X, imageNegX);

std::string strImagePosY = strDir + "Front face camera.bmp";;
osg::ref_ptr<osg::Image> imagePosY = osgDB::readImageFile(strImagePosY);
tcm->setImage(osg::TextureCubeMap::POSITIVE_Y, imagePosY);
std::string strImageNegY = strDir + "Back face camera.bmp";;
osg::ref_ptr<osg::Image> imageNegY = osgDB::readImageFile(strImageNegY);
tcm->setImage(osg::TextureCubeMap::NEGATIVE_Y, imageNegY);

std::string strImagePosZ = strDir + "Top face camera.bmp";
osg::ref_ptr<osg::Image> imagePosZ = osgDB::readImageFile(strImagePosZ);
tcm->setImage(osg::TextureCubeMap::POSITIVE_Z, imagePosZ);
std::string strImageNegZ = strDir + "Bottom face camera.bmp";
osg::ref_ptr<osg::Image> imageNegZ = osgDB::readImageFile(strImageNegZ);
tcm->setImage(osg::TextureCubeMap::NEGATIVE_Z, imageNegZ);

osg::ref_ptr<osg::Box> box = new osg::Box(osg::Vec3(0, 0, 0), 1);
osg::ref_ptr<osg::ShapeDrawable> drawable = new osg::ShapeDrawable(box);
osg::ref_ptr<osg::Geode> geode = new osg::Geode;
geode->addDrawable(drawable);
MyNodeVisitor nv;
geode->accept(nv);
osg::ref_ptr<osg::StateSet> stateset = geode->getOrCreateStateSet();
stateset->setTextureAttributeAndModes(0, tcm, osg::StateAttribute::OVERRIDE | osg::StateAttribute::ON);

//shader

osg::ref_ptr<osg::Shader> vs1 = new osg::Shader(osg::Shader::VERTEX, vertexShader);
osg::ref_ptr<osg::Shader> ps1 = new osg::Shader(osg::Shader::FRAGMENT, psShader);
osg::ref_ptr<osg::Program> program1 = new osg::Program;
program1->addShader(vs1);
program1->addShader(ps1);
program1->addBindAttribLocation("aPos", 1);

osg::ref_ptr<osg::Uniform> tex0Uniform = new osg::Uniform("environmentMap", 0);
stateset->addUniform(tex0Uniform);
osg::ref_ptr<osg::Uniform> roughnessUniform = new osg::Uniform("roughness", roughness);
stateset->addUniform(roughnessUniform);
stateset->setAttribute(program1, osg::StateAttribute::ON);

osgViewer::Viewer viewer;
osg::ref_ptr<osgGA::TrackballManipulator> manipulator = new osgGA::TrackballManipulator();
viewer.setCameraManipulator(manipulator);
osg::Vec3d newEye(0, 0, 0);
osg::Vec3 newCenter(0, 0, 0);
osg::Vec3 newUp(0, 1, 0);
manipulator->setHomePosition(newEye, newCenter, newUp);
osg::ref_ptr<osg::TextureCubeMap> textureCubeMap = getTextureCubeMap(viewer, mipWidth, mipHeight);
viewer.setSceneData(geode.get());

bool bPrinted = false;
while (!viewer.done())
{
	viewer.frame();
	if (!bPrinted)
	{
		bPrinted = true;
		int imageNumber = textureCubeMap->getNumImages();
		for (int i = 0; i < imageNumber; i++)
		{

			osg::ref_ptr<osg::Image> theImage = textureCubeMap->getImage(i);
			std::string strPrintName = "e:/hdr/Prefilter/" + std::to_string(level) + "/" + theImage->getFileName() + ".bmp";
			osgDB::writeImageFile(*theImage, strPrintName);
		}
	}
}
return 0;

}

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1048395.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

智慧工地管理系统源码(电脑端+手机端+APP+SAAS云平台)

智慧工地是指通过一系列先进的技术手段&#xff0c;实现施工现场的智能化管理&#xff0c;其核心是利用物联网技术&#xff0c;将施工现场的各种设备、机械、材料、人员等进行全面实时地监测和控制&#xff0c;以实现资源的最优配置和高效利用。 一、概述 智慧工地管理系统&am…

Linux虚拟机无法联网

问题描述 Centos7&#xff0c;配置了静态IP后&#xff0c;无法联网 解决方式 虚拟机连接不上网络&#xff0c;解决办法_虚拟机连不上网络-CSDN博客 根据上面文章一步步做。 发现 在Windows的cmd中&#xff0c;可以ping通我的Linux虚拟机 但是&#xff0c;在虚拟机里 无法 …

第九章 动态规划 part12 309. 最佳买卖股票时机含冷冻期 714. 买卖股票的最佳时机含手续费

第五十一天| 第九章 动态规划 part12 309. 最佳买卖股票时机含冷冻期 714. 买卖股票的最佳时机含手续费 一、309. 最佳买卖股票时机含冷冻期 题目链接&#xff1a;https://leetcode.cn/problems/best-time-to-buy-and-sell-stock-with-cooldown/ 题目介绍&#xff1a; 给定一…

layui 树状控件tree优化

先上效果图&#xff1a; 我选的组件是这个&#xff1a; 动态渲染完后&#xff0c;分别在窗体加载完成&#xff0c;节点点击事件分别加入js&#xff1a; //侧边栏图标替换//layui-icon-subtraction$(function () {$(".layui-icon-file").addClass("backs&quo…

【广州华锐互动】鱼类授精繁殖VR虚拟仿真实训系统

随着科技的不断发展&#xff0c;虚拟现实技术在各个领域的应用越来越广泛。在养殖业中&#xff0c;VR技术可以帮助养殖户进行家鱼授精实操演练&#xff0c;提高养殖效率和繁殖成功率。本文将介绍利用VR开展家鱼授精实操演练的方法和应用。 首先&#xff0c;我们需要了解家鱼授精…

QT基础入门——认识与创建QT(一)

前言&#xff1a; 前面学了Linux的基础命令、系统编程、网络编程&#xff0c;对LInux的使用也有了一个简单的了解与认识&#xff0c;之后的学习就要用到 imx6ull_pro这款开发板进行学习了&#xff0c;所以在使用前还是决定把QT的基础知识学习一下&#xff0c;好在后面的linu…

GeoPandas 基本使用

1.GeoPandas是什么 geopandas是一个开源项目&#xff0c;它的目的是使得在Python下更方便的处理地理空间数据。geopandas扩展了pandas的数据类型&#xff0c;允许其在几何类型上进行空间操作。geopandas主要结合了pandas和shapely框架的能力。 shapely 有一个名为 geometry 的类…

【DTEmpower案例操作教程】智能数据挖掘

DTEmpower是由天洑软件自主研发的一款通用的智能数据建模软件&#xff0c;致力于帮助工程师及工科专业学生&#xff0c;利用工业领域中的仿真、试验、测量等各类数据进行挖掘分析&#xff0c;建立高质量的数据模型&#xff0c;实现快速设计评估、实时仿真预测、系统参数预警、设…

m4a怎么转换mp3?4个方法包教包会

m4a怎么转换mp3&#xff1f;M4A是一种备受欢迎的音频文件格式&#xff0c;通常用于存储高保真音频数据。它代表着“MPEG-4 Audio”扩展名&#xff0c;这意味着它属于基于MPEG-4标准的音频格式之一。M4A格式有着众多的优势。首先&#xff0c;它能够提供出色的音质&#xff0c;并…

html、css学习记录【uniapp前奏】

Html 声明&#xff1a;该学习笔记源于菜鸟自学网站&#xff0c;特此记录笔记。很多示例源于此官网&#xff0c;若有侵权请联系删除。 文章目录 Html声明&#xff1a; CSS 全称 Cascading Style Sheets&#xff0c;层叠样式表。是一种用来为结构化文档&#xff08;如 HTML 文档…

在微信小程序中跳转到另一个小程序(多种实现方式)

方式一&#xff1a; 配置要跳转的appid和小程序页面路径 wx.navigateToMiniProgram({appId: 目标小程序appid,path: 目标小程序页面路径,//develop开发版&#xff1b;trial体验版&#xff1b;release正式版envVersion: release, success(res) {// 打开成功console.log("跳…

【MATLAB源码-第38期】基于OFDM的块状导频和梳状导频误码率性能对比,不同信道估计方法以及不同调制方式对比。

1、算法描述 块状导频和梳状导频都是用于无线通信系统中信道估计的方法。 块状导频&#xff1a; 定义&#xff1a; 在频域上&#xff0c;块状导频是连续放置的一组导频符号。这意味着所有的导频符号都集中在一个短的时间段内发送。 优点&#xff1a; 对于时间选择性信道&#…

项目管理常用工具有哪些?

项目管理常用的工具有很多&#xff0c;以下是一些常见的工具&#xff1a; 1. 甘特图&#xff1a;甘特图是一种图形化的工具&#xff0c;用于展示项目的时间计划和任务进度。它可以清晰地显示项目中各个任务的开始时间、结束时间以及任务之间的依赖关系。 2. 工作分解结构&#…

ChatGPT必应联网功能正式上线

今日凌晨发现&#xff0c;ChatGPT又支持必应联网了&#xff01;虽然有人使用过newbing这个阉割版的联网GPT4&#xff0c;但官方版本确实更加便捷好用啊&#xff01; 尽管 ChatGPT 此前已经展现出了其他人工智能模型无可比拟的智能&#xff0c;但由于其训练数据的限制&#xff…

【AI视野·今日CV 计算机视觉论文速览 第256期】Thu, 28 Sep 2023

AI视野今日CS.CV 计算机视觉论文速览 Thu, 28 Sep 2023 Totally 96 papers &#x1f449;上期速览✈更多精彩请移步主页 Daily Computer Vision Papers SHACIRA: Scalable HAsh-grid Compression for Implicit Neural Representations Authors Sharath Girish, Abhinav Shriva…

视频直播美颜sdk与计算机视觉的奇妙结合

在数字时代&#xff0c;视频直播已经成为了人们分享生活、娱乐互动的重要方式之一。而随着社交媒体和在线直播平台的不断发展&#xff0c;用户们对于直播质量和体验提出了越来越高的要求。其中之一就是美颜效果。美颜不仅仅是为了矫正自身缺陷&#xff0c;它更是一种增强直播吸…

安卓玩机-----反编译apk 修改apk 去广告 去弹窗等操作中的一些常识

安卓机型app的编译与反编译 apk文件的简单说明与解析 -安卓修改apk apk的组成和编译 一 电脑端几种反编译apk工具操作步骤解析 前面几个博文有说明关于反编译apk和apk架构等有些常识.今天对以上做个补充。初学者记住一点。对于一个apk文件使用压缩软件7zip打开可以查看到文件…

Aruba CX交换机 初始化配置

文章目录 CX交换机使用type-c接口console管理口配置&#xff1a;更改时间更改/创建管理员密码接口vlan配置DHCP配置配置保存 CX交换机使用type-c接口console Aruba cx交换机 console速率 Serial &#xff1a;115200 ##初始化清空配置&#xff1a; 6300&#xff1a; erase all…

基于DTW算法的命令字识别

DTW算法介绍 DTW(Dynamic Time Warping)&#xff1a;按距离最近原则&#xff0c;构建两个序列之间的对应的关系&#xff0c;评估两个序列的相似性。 要求&#xff1a; 单向对应&#xff0c;不能回头&#xff1b;一一对应&#xff0c;不能有空&#xff1b;对应之后&#xff0…

【图文】IRRA:跨模态隐式关系推理与对齐 | CVPR2023

详细内容指路zhihu&#x1f449;CVPR2023 | IRRA论文阅读 摘要 Text-to-image Person Retrieval的目的是根据给定的文本描述查询确定目标个体。主要的挑战是学习把视觉和文本模态映射到一个公共的潜在空间里。之前的工作尝试通过利用单模态分开预训练来提取图像和文本特征来解…