光线追踪:为球体制作平面



我正在做一个任务,我需要用平面(地板)对球体进行光线追踪。我有了球面,但我在平面上遇到了麻烦。我使用射线平面相交公式:T = -(o-p)我在Plane.h中有这个,但是当我运行我的代码时,我从Ray.h中得到错误。有人能解释一下我哪里做错了吗?如有任何帮助,不胜感激。

Plane.h

`#include "....raytraceRay.h"
class Plane
{
using Colour = cv::Vec3b; // RGB Value 
private:
Vec3 normal_;
Vec3 distance_;
Colour color_;
public:
Plane();
Plane(Vec3 norm, Vec3 dis, Colour color) : normal_(norm), distance_(dis), color_(color) {}
Vec3 norm() const {
return normal_;
}

Vec3 dis() const {
return distance_;
}

Colour color() const {
return color_;
}
float findIntersection(Ray ray) {
Vec3 rayDirection = ray.mPosition();
float denominator = rayDirection.dot(normal_);
if (denominator == 0) {
return false;
}
else {
//mPosition() is origin in Ray.h
float t = -(((ray.mPosition() - distance_)).dot(normal_)) / denominator;
}
}
};

'Ray.h

#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <cmath>
#include "Image.h"

// Data types
typedef float Scalar; //**custom datatype: Scalar is float
typedef Eigen::Matrix<Scalar, 3, 1> Vec3; //***Vec3 is a custom datatype (specific kind)
typedef Eigen::Matrix<Scalar, 2, 1> Vec2;
typedef unsigned char uchar;
class Ray
{
private:
Vec3 mPosition_; //point
public:
Ray() {}
//constuctor, when we construct ray we get mPosition_
Ray(Vec3 mPosition) :  mPosition_(mPosition) {
//
}
float t;  
Vec3 mPosition() const {
return mPosition_;
}
public:
inline Vec3 generateRay(Vec3 const& pt) {
Vec3 origin = mPosition_;
Vec3 direction = pt - mPosition_; // d = s -e, pt is pixel Position
direction.normalize(); 
return pt + t * direction;
}
};

main.cpp

#include <cmath>
#include "Image.h"
#include "Ray.h"
#include "../build/raytrace/Plane.h"
//Color functions
using Colour = cv::Vec3b; // RGB Value 
//Color is a Vec3b datatype, use Color instead of Vec3b, it has 3 vectors, hold 3 values b/w 0-255
Colour red() { return Colour(255, 0, 0); }
Colour green() { return Colour(0, 255,0); }
Colour blue() { return Colour(0, 0, 255); }
Colour white() { return Colour(255, 255, 255); }
Colour black() { return Colour(0, 0, 0); }

//bounding the channel wise pixel color between 0 to 255 
//bounding the color value, if a value is beyond 255 clamp it to 255, and any value below 0 clamp to 0.
uchar Clamp(int color)
{
if (color < 0) return 0;
if (color >= 255) return 255;
return color;
}
int main(int, char**){
//Create an image object with 500 x 500 resolution.
Image image = Image(500, 500);

//Coordinates of image rectangle
Vec3 llc = Vec3(-1, -1, -1); //**llc - lower left corner
Vec3 urc = Vec3(1, 1, -1); //**urc - upper right corner
int width = urc(0) - llc(0);
int height = urc(1) - llc(1);
Vec2 pixelUV = Vec2((float)width / image.cols, (float)height / image.rows);

/// TODO: define camera position (view point), sphere center, sphere radius (Weightage: 5%)
Vec3 CameraPoint = Vec3(0, 0, 0); //**it is the origin
Vec3 SphereCenter = Vec3(0, 0, -5); //**it is the Sphere Position
float SphereRadius = 2.0;

Vec3 LightSource = Vec3(2.0, 0.0, 3.0); //**
Vec3 ambient = Vec3(0, 0, 0.5); //**
Vec3 diffuse = Vec3(224, 180, 255); //** 0, 255, 100 - green 
Vec3 Origin = CameraPoint;
//end

for (int row = 0; row < image.rows; ++row) {
for (int col = 0; col < image.cols; ++col) {
//TODO: Build primary rays  
//Find the pixel position (PixelPos) for each row and col and then construct the vector PixelPos-Origin

Vec3 pixelPos = Vec3(llc(0) + pixelUV(0) * (col + 0.5), llc(1) + pixelUV(1) * (row + 0.5), -1); 

//create a ray object
Ray r; //**
//Vec3 rayDir = pixelPos - Origin; //**direction of the ray
Vec3 rayDir = r.generateRay(pixelPos); //**pixelPos-Origin
rayDir.normalize(); //**normalize the ray direction vector

//Ray-sphere intersection...(refer to the lecture slides and Section 4.4.1 of the textbook) 
float a = rayDir.dot(rayDir);
Vec3 s0_r0 = Origin - SphereCenter; //***s0_r0 - sphere center  - ray origin
float b = 2.0 * rayDir.dot(s0_r0);
float c = s0_r0.dot(s0_r0) - pow(SphereRadius, 2);
//compute the discriminant

float discriminant = pow(b, 2) - 4 * a * c; 


//if the discriminant is greater than zero
if(discriminant > 0){


//find roots t1 and t2
float t1 = (-b - sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
float t2 = (-b + sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
//determine which one is the real intersection point
float t; 
//Sphere s;
if (t1 < t2 && (t1 > 0 && t2 > 0)) {
t = t1;
//} //should this be after the if-statement below, so that it uses t = t1 and not just float t.????
if (t > 0) {
//Shade the pixel, normal is Intersection - SphereCenter, LightVector is LightSource- Intersection, make sure to normalize the vectors
Vec3 Intersection = Origin + (t * rayDir);
Vec3 Normal = Intersection - SphereCenter; //** normalize
Normal.normalize(); //**
Vec3 LightVector = LightSource - Intersection; //**normalize
LightVector.normalize(); //**
float diffuseTerm = LightVector.dot(Normal);
if (diffuseTerm < 0) diffuseTerm = 0;
Colour colour(0, 0, 0); //The ambient base
colour[0] = Clamp(ambient[0] + diffuse[0] * diffuseTerm);
colour[1] = Clamp(ambient[1] + diffuse[1] * diffuseTerm);
colour[2] = Clamp(ambient[2] + diffuse[2] * diffuseTerm);
image(row, col) = colour;
}
}//

else {
image(row, col) = black();
}

} else {
//No intersection, discriminant < 0
image(row, col) = red(); //**makes blue background colour 
}
////**Plane intersection
//create a plane object
Plane plane(Vec3(-5, 0, -4), Vec3(0, 0, -1), black()); 
//Plane plane;
////if ray hits plane -> color black
//if (plane.findIntersection(rayDir) == 1) {
//    image(row, col) = black();
//}
//else {
//    image(row, col) = white();
//}

}
}
/// Required outputs: (1) Ray traced image of a sphere (2) Ray traced image when the camera is placed inside the sphere (complete black)
image.save("./result.png");
image.display();
return EXIT_SUCCESS;
}

错误输入图片描述

#include是一个非常简单的指令。它实际上只是复制粘贴文件的内容。

main.cpp同时包含Ray.h和Plane.h, Plane.h也包含Ray.h,所以Ray.h最终被包含两次。这就是为什么编译器会报错"类重定义"。

你可以在你所有的头文件的顶部添加#pragma once,让编译器知道如果它已经包含在文件中,就跳过它。

注意:#pragma once不是该语言的正式组成部分,但它被所有编译器支持,并且比其他选项有一些小优势。

最新更新