咖啡精度。当我修改我的回归项目的精度时,层精度误差:的准确性。修改精度的层代码:
for (int i = 0; i < outer_num_; ++i)
for (int j = 0; j < inner_num_; ++j) {
Distance = sqrt((bottom_data[i * dim + j] - bottom_label[i * inner_num_ + j])*(bottom_data[i * dim + j] - bottom_label[i * inner_num_ + j]));
if (Distance <= 10) {
++accuracy;
}
}
}
,但结果是:
I1008 22:14:37.701171 102764 caffe.cpp:286] Loss: 70993.9 I1008 22:14:37.701171 102764 caffe.cpp:298] accuracy = -1.#IND
这是我的net.prototxt
:
layer {
name: "framert"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "G:/lab-zhang/caffe-windows/data/csv/train_data_list.txt"
batch_size: 10
}
}
layer {
name: "inner1"
type: "InnerProduct"
bottom: "data"
top: "inner1"
param {
lr_mult: 1
decay_mult: 1.5
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 50
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "inner2"
type: "InnerProduct"
bottom: "inner1"
top: "inner2"
param {
lr_mult: 1
decay_mult: 1.0
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "inner2"
top: "inner2"
relu_param {
engine: CAFFE
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "inner2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "inner2"
bottom: "label"
top: "loss"
}
错误结果的原因是什么:accuracy:-1。#印第安纳州吗?这是我的net.prototxt
:
layer {
name: "framert"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "G:/lab-zhang/caffe-windows/data/csv/train_data_list.txt"
batch_size: 10
}
}
layer {
name: "inner1"
type: "InnerProduct"
bottom: "data"
top: "inner1"
param {
lr_mult: 1
decay_mult: 1.5
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 50
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "inner2"
type: "InnerProduct"
bottom: "inner1"
top: "inner2"
param {
lr_mult: 1
decay_mult: 1.0
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "inner2"
top: "inner2"
relu_param {
engine: CAFFE
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "inner2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "inner2"
bottom: "label"
top: "loss"
}
您得到的精度:-1.#IND
意味着您的代码计算的值不是一个数字(NaN
)。
为什么你得到NaN
是不清楚从你发布的代码。我怀疑你修改了太多的精度层代码,并引入了一个导致Nan
的bug。确保你没有忘记更新count
,并确保你更新top[0]->mutable_cpu_data()[0]
与计算精度。
一般来说,最好不要覆盖现有的层,而是编写具有所需功能的新层。
在编写新层时,请遵循caffe wiki和这个git问题中的指导方针。具体来说,为您的层编写一个测试!