摘要
我正在将一个基于Scratchapixel版本的简单光线跟踪应用程序移植到一堆GPU库中。我使用运行时API和驱动程序API成功地将其移植到CUDA,但当我尝试使用NVRTC在运行时编译的PTX时,它抛出了Segmentation fault (core dumped)
。如果我在内核文件的开头取消注释#include <math.h>
指令(见下文),它仍然可以使用NVCC(生成的PTX完全相同),但在使用NVRTC编译时失败。
我想知道如何让NVRTC表现得像NVCC一样(这可能吗?),或者至少了解这个问题背后的原因。
详细说明
文件kernel.cu
(内核源):
//#include <math.h>
#define MAX_RAY_DEPTH 5
template<typename T>
class Vec3
{
public:
T x, y, z;
__device__ Vec3() : x(T(0)), y(T(0)), z(T(0)) {}
__device__ Vec3(T xx) : x(xx), y(xx), z(xx) {}
__device__ Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {}
__device__ Vec3& normalize()
{
T nor2 = length2();
if (nor2 > 0) {
T invNor = 1 / sqrt(nor2);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
__device__ Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); }
__device__ Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); }
__device__ T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; }
__device__ Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); }
__device__ Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); }
__device__ Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; }
__device__ Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
__device__ Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); }
__device__ T length2() const { return x * x + y * y + z * z; }
__device__ T length() const { return sqrt(length2()); }
};
typedef Vec3<float> Vec3f;
typedef Vec3<bool> Vec3b;
class Sphere
{
public:
const char* id;
Vec3f center; /// position of the sphere
float radius, radius2; /// sphere radius and radius^2
Vec3f surfaceColor, emissionColor; /// surface color and emission (light)
float transparency, reflection; /// surface transparency and reflectivity
int animation_frame;
Vec3b animation_position_rand;
Vec3f animation_position;
Sphere(
const char* id,
const Vec3f &c,
const float &r,
const Vec3f &sc,
const float &refl = 0,
const float &transp = 0,
const Vec3f &ec = 0) :
id(id), center(c), radius(r), radius2(r * r), surfaceColor(sc),
emissionColor(ec), transparency(transp), reflection(refl)
{
animation_frame = 0;
}
//[comment]
// Compute a ray-sphere intersection using the geometric solution
//[/comment]
__device__ bool intersect(const Vec3f &rayorig, const Vec3f &raydir, float &t0, float &t1) const
{
Vec3f l = center - rayorig;
float tca = l.dot(raydir);
if (tca < 0) return false;
float d2 = l.dot(l) - tca * tca;
if (d2 > radius2) return false;
float thc = sqrt(radius2 - d2);
t0 = tca - thc;
t1 = tca + thc;
return true;
}
};
__device__ float mix(const float &a, const float &b, const float &mixval)
{
return b * mixval + a * (1 - mixval);
}
__device__ Vec3f trace(
const Vec3f &rayorig,
const Vec3f &raydir,
const Sphere *spheres,
const unsigned int spheres_size,
const int &depth)
{
float tnear = INFINITY;
const Sphere* sphere = NULL;
// find intersection of this ray with the sphere in the scene
for (unsigned i = 0; i < spheres_size; ++i) {
float t0 = INFINITY, t1 = INFINITY;
if (spheres[i].intersect(rayorig, raydir, t0, t1)) {
if (t0 < 0) t0 = t1;
if (t0 < tnear) {
tnear = t0;
sphere = &spheres[i];
}
}
}
// if there's no intersection return black or background color
if (!sphere) return Vec3f(2);
Vec3f surfaceColor = 0; // color of the ray/surfaceof the object intersected by the ray
Vec3f phit = rayorig + raydir * tnear; // point of intersection
Vec3f nhit = phit - sphere->center; // normal at the intersection point
nhit.normalize(); // normalize normal direction
// If the normal and the view direction are not opposite to each other
// reverse the normal direction. That also means we are inside the sphere so set
// the inside bool to true. Finally reverse the sign of IdotN which we want
// positive.
float bias = 1e-4; // add some bias to the point from which we will be tracing
bool inside = false;
if (raydir.dot(nhit) > 0) nhit = -nhit, inside = true;
if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
float facingratio = -raydir.dot(nhit);
// change the mix value to tweak the effect
float fresneleffect = mix(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3f refldir = raydir - nhit * 2 * raydir.dot(nhit);
refldir.normalize();
Vec3f reflection = trace(phit + nhit * bias, refldir, spheres, spheres_size, depth + 1);
Vec3f refraction = 0;
// if the sphere is also transparent compute refraction ray (transmission)
if (sphere->transparency) {
float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface?
float cosi = -nhit.dot(raydir);
float k = 1 - eta * eta * (1 - cosi * cosi);
Vec3f refrdir = raydir * eta + nhit * (eta * cosi - sqrt(k));
refrdir.normalize();
refraction = trace(phit - nhit * bias, refrdir, spheres, spheres_size, depth + 1);
}
// the result is a mix of reflection and refraction (if the sphere is transparent)
surfaceColor = (
reflection * fresneleffect +
refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
}
else {
// it's a diffuse object, no need to raytrace any further
for (unsigned i = 0; i < spheres_size; ++i) {
if (spheres[i].emissionColor.x > 0) {
// this is a light
Vec3f transmission = 1;
Vec3f lightDirection = spheres[i].center - phit;
lightDirection.normalize();
for (unsigned j = 0; j < spheres_size; ++j) {
if (i != j) {
float t0, t1;
if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) {
transmission = 0;
break;
}
}
}
surfaceColor += sphere->surfaceColor * transmission *
max(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor;
}
}
}
return surfaceColor + sphere->emissionColor;
}
extern "C" __global__
void raytrace_kernel(unsigned int width, unsigned int height, Vec3f *image, Sphere *spheres, unsigned int spheres_size, float invWidth, float invHeight, float aspectratio, float angle) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < height && x < width) {
float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3f raydir(xx, yy, -1);
raydir.normalize();
image[y*width+x] = trace(Vec3f(0), raydir, spheres, spheres_size, 0);
}
}
我可以使用:nvcc --ptx kernel.cu -o kernel.ptx
(这里是完整的PTX)成功编译它,并使用以下代码段在带有cuModuleLoadDataEx
的驱动程序API中使用该PTX。它按预期工作。
即使我取消对#include <math.h>
行的注释,它也能正常工作(实际上,生成的PTX完全相同)。
CudaSafeCall( cuInit(0) );
CUdevice device;
CudaSafeCall( cuDeviceGet(&device, 0) );
CUcontext context;
CudaSafeCall( cuCtxCreate(&context, 0, device) );
unsigned int error_buffer_size = 1024;
std::vector<CUjit_option> options;
std::vector<void*> values;
char* error_log = new char[error_buffer_size];
options.push_back(CU_JIT_ERROR_LOG_BUFFER); //Pointer to a buffer in which to print any log messages that reflect errors
values.push_back(error_log);
options.push_back(CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES); //Log buffer size in bytes. Log messages will be capped at this size (including null terminator)
values.push_back(&error_buffer_size);
options.push_back(CU_JIT_TARGET_FROM_CUCONTEXT); //Determines the target based on the current attached context (default)
values.push_back(0); //No option value required for CU_JIT_TARGET_FROM_CUCONTEXT
CUmodule module;
CUresult status = cuModuleLoadDataEx(&module, ptxSource, options.size(), options.data(), values.data());
if (error_log && error_log[0]) { //https://stackoverflow.com/a/7970669/3136474
std::cout << "Compiler error: " << error_log << std::endl;
}
CudaSafeCall( status );
然而,每当我尝试使用NVRTC(此处为完整的PTX)编译这个确切的内核时,它都会成功编译,但在调用cuModuleLoadDataEx
时会给我一个Segmentation fault (core dumped)
(当我尝试使用生成的PTX时)。
如果我取消对#include <math.h>
行的注释,它在nvrtcCompileProgram
调用时失败,输出如下:
nvrtcSafeBuild() failed at cuda_raytracer_nvrtc_api.cpp:221 : NVRTC_ERROR_COMPILATION
Build log:
/usr/include/bits/mathcalls.h(177): error: linkage specification is incompatible with previous "isinf"
__nv_nvrtc_builtin_header.h(126689): here
/usr/include/bits/mathcalls.h(211): error: linkage specification is incompatible with previous "isnan"
__nv_nvrtc_builtin_header.h(126686): here
2 errors detected in the compilation of "kernel.cu".
我用NVRTC编译它的代码是:
nvrtcProgram prog;
NvrtcSafeCall( nvrtcCreateProgram(&prog, kernelSource, "kernel.cu", 0, NULL, NULL) );
// https://docs.nvidia.com/cuda/nvrtc/index.html#group__options
std::vector<const char*> compilationOpts;
compilationOpts.push_back("--device-as-default-execution-space");
// NvrtcSafeBuild is a macro which automatically prints nvrtcGetProgramLog if the compilation fails
NvrtcSafeBuild( nvrtcCompileProgram(prog, compilationOpts.size(), compilationOpts.data()), prog );
size_t ptxSize;
NvrtcSafeCall( nvrtcGetPTXSize(prog, &ptxSize) );
char* ptxSource = new char[ptxSize];
NvrtcSafeCall( nvrtcGetPTX(prog, ptxSource) );
NvrtcSafeCall( nvrtcDestroyProgram(&prog) );
然后,我简单地使用前面的代码段加载ptxSource
(注意:该代码块与驱动程序API版本和NVRTC版本使用的代码块相同)。
到目前为止我注意到/尝试过的其他事情
- NVCC生成的PTX和NVRTC生成的PTX非常不同,但我无法理解它们以确定可能的问题
- 尝试指定特定的GPU架构(在我的情况下,CC 6.1)给编译器,没有区别
- 已尝试禁用任何编译器优化(
nvrtcCompileProgram
中的选项--ftz=false --prec-sqrt=true --prec-div=true --fmad=false
)。PTX文件变大了,但仍然分段 - 尝试将
--std=c++11
或--std=c++14
添加到NVRTC编译器选项中。使用它们中的任何一个,NVRTC都会生成一个几乎为空(4行)的PTX,但在我尝试使用它之前,不会发出警告或错误
环境
- SO:Ubuntu 18.04.4 LTS 64位
nvcc --version
:Cuda编译工具,10.1版,V10.1.168。建于星期三_Apr_24_19:10:27_PDT_2019gcc --version
:gcc(Ubuntu 7.5.0-3ubuntu1~18.04)7.5.0- 硬件:Intel I7-7700HQ,GeForce GTX 1050 Ti
在OP+1天编辑
我忘了添加我的环境。请参阅上一节。
您还能用ptxas编译nvrtc输出吗?–@talonmies的评论
nvcc
生成的PTX编译时出现警告:
$ ptxas -o /tmp/temp_ptxas_output.o kernel.ptx
ptxas warning : Stack size for entry function 'raytrace_kernel' cannot be statically determined
这是由于递归内核函数(更多)。可以放心地忽略它。
nvrtc
生成的PTX不编译并发出错误:
$ ptxas -o /tmp/temp_ptxas_output.o nvrtc_kernel.ptx
ptxas fatal : Unresolved extern function '_Z5powiffi'
基于这个问题,我在Sphere
类构造函数中添加了__device__
,并删除了--device-as-default-execution-space
编译器选项。它现在生成的PTX略有不同,但仍然呈现相同的错误。
使用#include <math.h>
编译现在会生成很多"没有执行空间注释的函数被视为宿主函数,在JIT模式下不允许使用宿主函数。"除了以前的错误之外,还有警告。
如果我试图使用这个问题的公认解决方案,它会给我带来一堆语法错误,并且不会编译。NVCC仍然完美工作。
刚刚通过古老的注释和测试方法找到了罪魁祸首:如果我删除了trace
方法中用于计算菲涅耳效应的pow
调用,错误就会消失。
目前,我刚刚将pow(var, 3)
替换为var*var*var
。
我创建了一个MVCE,并向NVIDIA填写了一份错误报告:https://developer.nvidia.com/nvidia_bug/2917596.
张回答并指出了问题:
代码中的问题是传递给cuModuleLoadDataEx的选项值不正确。行内:
options.push_back(CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES); //Log buffer size in bytes. Log messages will be capped at this size (including null terminator) values.push_back(&error_buffer_size);
提供了缓冲区大小选项,但传递的不是带有大小的值,而是指向该值的指针。由于该指针随后被读取为数字,因此驱动程序假定缓冲区大小比1024大得多。
在NVRTC编译过程中,发生了一个"Unsolved extern function"错误,因为pow函数签名(如您在文档中所见)是:
__device__ double pow ( double x, double y )
当驱动程序在将错误消息放入缓冲区时试图将缓冲区清零时,发生了segfault
如果没有对pow的调用,就没有编译错误,因此没有使用错误缓冲区,也没有segfault。为了确保设备代码是正确的,用于调用pow函数和输出指针的值应该是一个双数,或者可以使用浮点等效函数
powf
。
如果我将调用更改为values.push_back((void*)error_buffer_size);
,它将报告与生成的PTX:的ptxas
编译相同的错误
Compiler error: ptxas fatal : Unresolved extern function '_Z5powiffi'
cudaSafeCall() failed at file.cpp:74 : CUDA_ERROR_INVALID_PTX - a PTX JIT compilation failed