Skip to content

Commit fe9c51b

Browse files
committed
Merge pull request BVLC#422 from sguada/threshold_layer
Threshold layer to binarize features Added GPU code and tested
2 parents 22bc596 + 79042d4 commit fe9c51b

File tree

5 files changed

+261
-1
lines changed

5 files changed

+261
-1
lines changed

include/caffe/neuron_layers.hpp

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,37 @@ class TanHLayer : public NeuronLayer<Dtype> {
202202
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
203203
};
204204

205+
/* ThresholdLayer
206+
Outputs 1 if value in input is above threshold, 0 otherwise.
207+
The defult threshold = 0, which means positive values would become 1 and
208+
negative or 0, would become 0
209+
210+
y = 1 if x > threshold
211+
y = 0 if x <= threshold
212+
213+
y' = don't differenciable
214+
*/
215+
template <typename Dtype>
216+
class ThresholdLayer : public NeuronLayer<Dtype> {
217+
public:
218+
explicit ThresholdLayer(const LayerParameter& param)
219+
: NeuronLayer<Dtype>(param) {}
220+
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
221+
vector<Blob<Dtype>*>* top);
222+
223+
protected:
224+
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
225+
vector<Blob<Dtype>*>* top);
226+
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
227+
vector<Blob<Dtype>*>* top);
228+
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
229+
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
230+
NOT_IMPLEMENTED;
231+
}
232+
233+
Dtype threshold_;
234+
};
235+
205236
} // namespace caffe
206237

207238
#endif // CAFFE_NEURON_LAYERS_HPP_
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
// Copyright 2014 BVLC and contributors.
2+
3+
#include <vector>
4+
5+
#include "caffe/layer.hpp"
6+
#include "caffe/vision_layers.hpp"
7+
8+
9+
namespace caffe {
10+
11+
template <typename Dtype>
12+
void ThresholdLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
13+
vector<Blob<Dtype>*>* top) {
14+
NeuronLayer<Dtype>::SetUp(bottom, top);
15+
threshold_ = this->layer_param_.threshold_param().threshold();
16+
}
17+
18+
template <typename Dtype>
19+
Dtype ThresholdLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
20+
vector<Blob<Dtype>*>* top) {
21+
const Dtype* bottom_data = bottom[0]->cpu_data();
22+
Dtype* top_data = (*top)[0]->mutable_cpu_data();
23+
const int count = bottom[0]->count();
24+
for (int i = 0; i < count; ++i) {
25+
top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0);
26+
}
27+
return Dtype(0);
28+
}
29+
30+
INSTANTIATE_CLASS(ThresholdLayer);
31+
32+
} // namespace caffe
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
// Copyright 2014 BVLC and contributors.
2+
3+
#include <algorithm>
4+
#include <vector>
5+
6+
#include "caffe/layer.hpp"
7+
#include "caffe/vision_layers.hpp"
8+
9+
using std::max;
10+
11+
namespace caffe {
12+
13+
template <typename Dtype>
14+
__global__ void ThresholdForward(const int n, const Dtype threshold,
15+
const Dtype* in, Dtype* out) {
16+
CUDA_KERNEL_LOOP(index, n) {
17+
out[index] = in[index] > threshold ? 1 : 0;
18+
}
19+
}
20+
21+
template <typename Dtype>
22+
Dtype ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
23+
vector<Blob<Dtype>*>* top) {
24+
const Dtype* bottom_data = bottom[0]->gpu_data();
25+
Dtype* top_data = (*top)[0]->mutable_gpu_data();
26+
const int count = bottom[0]->count();
27+
// NOLINT_NEXT_LINE(whitespace/operators)
28+
ThresholdForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
29+
count, threshold_, bottom_data, top_data);
30+
CUDA_POST_KERNEL_CHECK;
31+
32+
return Dtype(0);
33+
}
34+
35+
36+
INSTANTIATE_CLASS(ThresholdLayer);
37+
38+
39+
} // namespace caffe

src/caffe/proto/caffe.proto

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ message LayerParameter {
127127
// line above the enum. Update the next available ID when you add a new
128128
// LayerType.
129129
//
130-
// LayerType next available ID: 31 (last added: ARGMAX)
130+
// LayerType next available ID: 32 (last added: THRESHOLD)
131131
enum LayerType {
132132
// "NONE" layer type is 0th enum element so that we don't cause confusion
133133
// by defaulting to an existent LayerType (instead, should usually error if
@@ -163,6 +163,7 @@ message LayerParameter {
163163
SPLIT = 22;
164164
TANH = 23;
165165
WINDOW_DATA = 24;
166+
THRESHOLD = 31;
166167
}
167168
optional LayerType type = 5; // the layer type from the enum above
168169

@@ -175,6 +176,7 @@ message LayerParameter {
175176
repeated float weight_decay = 8;
176177

177178
// Parameters for particular layer types.
179+
// Parameters next available ID: 26 (last added: ThresholdParameter)
178180
optional ArgMaxParameter argmax_param = 23;
179181
optional ConcatParameter concat_param = 9;
180182
optional ConvolutionParameter convolution_param = 10;
@@ -191,6 +193,7 @@ message LayerParameter {
191193
optional PoolingParameter pooling_param = 19;
192194
optional PowerParameter power_param = 21;
193195
optional WindowDataParameter window_data_param = 20;
196+
optional ThresholdParameter threshold_param = 25;
194197

195198
// DEPRECATED: The layer parameters specified as a V0LayerParameter.
196199
// This should never be used by any code except to upgrade to the new
@@ -261,6 +264,11 @@ message EltwiseParameter {
261264
repeated float coeff = 2; // blob-wise coefficient for SUM operation
262265
}
263266

267+
// Message that stores parameters used by ThresholdLayer
268+
message ThresholdParameter {
269+
optional float threshold = 1 [default = 0]; // Strictly Positive values
270+
}
271+
264272
// Message that stores parameters used by HDF5DataLayer
265273
message HDF5DataParameter {
266274
// Specify the data source.
Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
// Copyright 2014 BVLC and contributors.
2+
3+
#include <vector>
4+
5+
#include "cuda_runtime.h"
6+
#include "gtest/gtest.h"
7+
#include "caffe/blob.hpp"
8+
#include "caffe/common.hpp"
9+
#include "caffe/filler.hpp"
10+
#include "caffe/vision_layers.hpp"
11+
#include "caffe/test/test_gradient_check_util.hpp"
12+
13+
#include "caffe/test/test_caffe_main.hpp"
14+
15+
namespace caffe {
16+
17+
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
18+
19+
template <typename Dtype>
20+
class ThresholdLayerTest : public ::testing::Test {
21+
protected:
22+
ThresholdLayerTest()
23+
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
24+
blob_top_(new Blob<Dtype>()) {
25+
Caffe::set_random_seed(1701);
26+
// fill the values
27+
FillerParameter filler_param;
28+
GaussianFiller<Dtype> filler(filler_param);
29+
filler.Fill(this->blob_bottom_);
30+
blob_bottom_vec_.push_back(blob_bottom_);
31+
blob_top_vec_.push_back(blob_top_);
32+
}
33+
virtual ~ThresholdLayerTest() { delete blob_bottom_; delete blob_top_; }
34+
Blob<Dtype>* const blob_bottom_;
35+
Blob<Dtype>* const blob_top_;
36+
vector<Blob<Dtype>*> blob_bottom_vec_;
37+
vector<Blob<Dtype>*> blob_top_vec_;
38+
};
39+
40+
typedef ::testing::Types<float, double> Dtypes;
41+
TYPED_TEST_CASE(ThresholdLayerTest, Dtypes);
42+
43+
44+
TYPED_TEST(ThresholdLayerTest, TestSetup) {
45+
LayerParameter layer_param;
46+
ThresholdLayer<TypeParam> layer(layer_param);
47+
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
48+
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
49+
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
50+
EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_->height());
51+
EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_->width());
52+
}
53+
54+
TYPED_TEST(ThresholdLayerTest, TestCPU) {
55+
LayerParameter layer_param;
56+
Caffe::set_mode(Caffe::CPU);
57+
ThresholdLayer<TypeParam> layer(layer_param);
58+
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
59+
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
60+
// Now, check values
61+
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
62+
const TypeParam* top_data = this->blob_top_->cpu_data();
63+
const TypeParam threshold_ = layer_param.threshold_param().threshold();
64+
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
65+
EXPECT_GE(top_data[i], 0.);
66+
EXPECT_LE(top_data[i], 1.);
67+
if (top_data[i] == 0) {
68+
EXPECT_LE(bottom_data[i], threshold_);
69+
}
70+
if (top_data[i] == 1) {
71+
EXPECT_GT(bottom_data[i], threshold_);
72+
}
73+
}
74+
}
75+
76+
TYPED_TEST(ThresholdLayerTest, TestCPU2) {
77+
LayerParameter layer_param;
78+
Caffe::set_mode(Caffe::CPU);
79+
ThresholdParameter* threshold_param =
80+
layer_param.mutable_threshold_param();
81+
threshold_param->set_threshold(0.5);
82+
ThresholdLayer<TypeParam> layer(layer_param);
83+
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
84+
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
85+
// Now, check values
86+
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
87+
const TypeParam* top_data = this->blob_top_->cpu_data();
88+
const TypeParam threshold_ = layer_param.threshold_param().threshold();
89+
EXPECT_FLOAT_EQ(threshold_, 0.5);
90+
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
91+
EXPECT_GE(top_data[i], 0.);
92+
EXPECT_LE(top_data[i], 1.);
93+
if (top_data[i] == 0) {
94+
EXPECT_LE(bottom_data[i], threshold_);
95+
}
96+
if (top_data[i] == 1) {
97+
EXPECT_GT(bottom_data[i], threshold_);
98+
}
99+
}
100+
}
101+
102+
TYPED_TEST(ThresholdLayerTest, TestGPU) {
103+
LayerParameter layer_param;
104+
Caffe::set_mode(Caffe::GPU);
105+
ThresholdLayer<TypeParam> layer(layer_param);
106+
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
107+
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
108+
// Now, check values
109+
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
110+
const TypeParam* top_data = this->blob_top_->cpu_data();
111+
const TypeParam threshold_ = layer_param.threshold_param().threshold();
112+
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
113+
EXPECT_GE(top_data[i], 0.);
114+
EXPECT_LE(top_data[i], 1.);
115+
if (top_data[i] == 0) {
116+
EXPECT_LE(bottom_data[i], threshold_);
117+
}
118+
if (top_data[i] == 1) {
119+
EXPECT_GT(bottom_data[i], threshold_);
120+
}
121+
}
122+
}
123+
124+
TYPED_TEST(ThresholdLayerTest, TestGPU2) {
125+
LayerParameter layer_param;
126+
Caffe::set_mode(Caffe::GPU);
127+
ThresholdParameter* threshold_param =
128+
layer_param.mutable_threshold_param();
129+
threshold_param->set_threshold(0.5);
130+
ThresholdLayer<TypeParam> layer(layer_param);
131+
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
132+
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
133+
// Now, check values
134+
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
135+
const TypeParam* top_data = this->blob_top_->cpu_data();
136+
const TypeParam threshold_ = layer_param.threshold_param().threshold();
137+
EXPECT_FLOAT_EQ(threshold_, 0.5);
138+
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
139+
EXPECT_GE(top_data[i], 0.);
140+
EXPECT_LE(top_data[i], 1.);
141+
if (top_data[i] == 0) {
142+
EXPECT_LE(bottom_data[i], threshold_);
143+
}
144+
if (top_data[i] == 1) {
145+
EXPECT_GT(bottom_data[i], threshold_);
146+
}
147+
}
148+
}
149+
150+
} // namespace caffe

0 commit comments

Comments
 (0)