# JY, 드래그 복사 금지

ABOUT ME

-

Today
-
Yesterday
-
Total
-
  • c++ 로 구현하는 간단한 CNN
    공부/Digital Image Processing 2019. 12. 12. 01:46

    산학장학생 과제 관련하여

    C++로 OpenCV 코드를 짤 일이 생겼다.

     

    Classifier를 사용해야 하는데

    OpenCV에 잘 구현되어있는 내장함수들을 사용해

    Hog/Haar feature를 이용한 SVM등으로 Classification을 수행해도 되는 문제이지만

     

    그냥 개인적인 욕심으로

    다른 딥러닝 프레임워크를 사용하지 않고

    CPU환경에서 동작하는 CNN을 구현해보고 싶었다.

     

    기본적인 구조는 MATLAB의 방식을 따랐으며

    학습또한 MATLAB으로 진행하고 parameter만 불러온다.

     

    가장 애를 먹었던 부분은

    MATLAB의 마지막 풀링 layer에서  FC layer로 어떤 방식으로 flatten 하여 넘겨주는지 몰라 알아내느라 쩔쩔맸다.

    MATLAB은 2D matrix를 flatten하는 방향을 row-wise기준으로 한다...

    ReLU layer는 구현은 해놨지만 그냥 Conv layer내에서 inplace연산으로 진행한다.

     

    모델의 구조를 미리 지정해주고,

    트레이닝된 웨이트만 불러와 분류작업을 수행할 수 있다.

     

    forward pass는 MATLAB과 결과가 동일함을 확인했다.

    아직 완성된건 아니고 수정할것도 많지만 기록을 위해 남겨둔다.

     

    레이어 클래스 선언 및 구현

    /* INPUT_LAYER */
    class INPUT_LAYER {
    private:
    	int layer_row, layer_col, layer_dim;
    public:
    	/* normalize operation? */
    	float* layer, * mean;
    	void init_layer(int row, int col, int dim);
    	void load_mean(float* data);
    	void Mat_to_layer(Mat in_img);
    	void Mat_to_layer_Min_Max_Norm(Mat in_img);
    	void Mat_to_layer_with_zerocenter(Mat in_img);
    };
    
    /* CONV_LAYER */
    class CONV_LAYER {
    private:
    	bool same_flag;
    	int layer_row, layer_col, layer_dim,
    		in_layer_row, in_layer_col,
    		kernel_row, kernel_col, kernel_indim, kernel_outdim;
    public:
    	float* layer, * w, * b;
    	void init_layer(int * row, int * col, int dim, bool padding_same);
    	void init_kernel(int k_row, int k_col, int k_indim, int kernel_outdim);
    	void load_kernel(float* w_data, float* b_data);
    	void forward_pass(float* in_layer, bool relu_inplace);
    };
    
    /* ReLU_LAYER */
    class ReLU_LAYER {
    private:
    	int layer_alldim;
    public:
    	float* layer;
    	void init_layer(int alldim);
    	void forward_pass(float* in_layer);
    };
    
    /* FC_LAYER */
    class FC_LAYER {
    private:
    	int in_layer_alldim, layer_alldim;
    public:
    	float* layer, * w, * b;
    	void init_layer(int in_alldim, int out_alldim);
    	void init_wb();
    	void load_wb(float* w_data, float* b_data);
    	void forward_pass(float* in_layer, bool relu_inplace);
    	void forward_pass_after_pool(float* in_layer, bool relu_inplace);
    };
    
    /* MXPOOLING_LAYER */
    class MXP_LAYER {
    private:
    	int layer_row, layer_col, layer_dim, pool_sz;
    	int in_layer_row, in_layer_col, in_layer_dim;
    public:
    	float* layer;
    	void init_layer(int * in_row, int * in_col, int dim, int pool_size);
    	void forward_pass(float* in_layer);
    };
    
    /* SOFTMAX_LAYER */
    class SOFTMAX_LAYER {
    private:
    	int layer_alldim;
    public:
    	float* layer;
    	void init_layer(int alldim);
    	void forward_pass(float* in_layer);
    };
    /* INPUT_LAYER */
    void INPUT_LAYER::init_layer(int row, int col, int dim) {
    	layer_row = row, layer_col = col, layer_dim = dim;
    	layer = new float[layer_row * layer_col * layer_dim]();
    }
    void INPUT_LAYER::load_mean(float* data) {
    	mean = new float[layer_row * layer_col * layer_dim]();
    	for (int i = 0; i < layer_row * layer_col * layer_dim; i++) {
    		*(mean + i) = *(data + i);
    	}
    }
    void INPUT_LAYER::Mat_to_layer(Mat in_img) {
    	for (int row = 0; row < in_img.rows; row++) {
    		for (int col = 0; col < in_img.cols; col++) {
    			/* zero center normalization */
    			layer[row * layer_col + col] =
    				(float)in_img.ptr<RGB>(row)[col].red;
    			layer[layer_row * layer_col + row * layer_col + col] =
    				(float)in_img.ptr<RGB>(row)[col].green;
    			layer[layer_row * layer_col * 2 + row * layer_col + col] =
    				(float)in_img.ptr<RGB>(row)[col].blue;
    		}
    	}
    }
    void INPUT_LAYER::Mat_to_layer_Min_Max_Norm(Mat in_img) {
    	float r_val, g_val, b_val;
    	float min_val = 99999;
    	float max_val = 0;
    	for (int row = 0; row < in_img.rows; row++) {
    		for (int col = 0; col < in_img.cols; col++) {
    			/* Min max normalization */
    			r_val = (float)in_img.ptr<RGB>(row)[col].red;
    			if (r_val < min_val) min_val = r_val;
    			if (r_val > max_val) max_val = r_val;
    			g_val = (float)in_img.ptr<RGB>(row)[col].green;
    			if (g_val < min_val) min_val = g_val;
    			if (g_val > max_val) max_val = g_val;
    			b_val = (float)in_img.ptr<RGB>(row)[col].blue;
    			if (b_val < min_val) min_val = b_val;
    			if (b_val > max_val) max_val = b_val;
    		}
    	}
    
    	for (int row = 0; row < in_img.rows; row++) {
    		for (int col = 0; col < in_img.cols; col++) {
    			/* zero center normalization */
    			layer[row * layer_col + col] =
    				(((float)in_img.ptr<RGB>(row)[col].red - min_val) / (max_val - min_val));
    			layer[layer_row * layer_col + row * layer_col + col] =
    				(((float)in_img.ptr<RGB>(row)[col].green - min_val) / (max_val - min_val));
    			layer[layer_row * layer_col * 2 + row * layer_col + col] =
    				(((float)in_img.ptr<RGB>(row)[col].blue - min_val) / (max_val - min_val));
    		}
    	}
    }
    void INPUT_LAYER::Mat_to_layer_with_zerocenter(Mat in_img) {
    	for (int row = 0; row < in_img.rows; row++) {
    		for (int col = 0; col < in_img.cols; col++) {
    			/* zero center normalization */
    			layer[row * layer_col + col] = 
    				(float)in_img.ptr<RGB>(row)[col].red - mean[row* layer_col + col];
    			layer[layer_row * layer_col + row * layer_col + col] = 
    				(float)in_img.ptr<RGB>(row)[col].green - mean[layer_row * layer_col + row * layer_col + col];
    			layer[layer_row * layer_col * 2 + row * layer_col + col] = 
    				(float)in_img.ptr<RGB>(row)[col].blue - mean[layer_row * layer_col * 2 + row * layer_col + col];
    		}
    	}
    }
    
    
    /* CONV_LAYER */
    void CONV_LAYER::init_layer(int * in_row, int * in_col, int dim, bool padding_same) {
    	same_flag = padding_same;
    	in_layer_row = *in_row, in_layer_col = *in_col, layer_dim = dim;
    	if (same_flag) {
    		layer_row = in_layer_row, layer_col = in_layer_col;
    	}
    	else {
    		layer_row = in_layer_row - 2, layer_col = in_layer_col - 2;
    		*in_row -= 2, * in_col -= 2;
    	}
    	layer = new float[layer_row * layer_col * layer_dim]();
    }
    void CONV_LAYER::init_kernel(int k_row, int k_col, int k_indim, int k_outdim) {
    	kernel_row = k_row, kernel_col = k_col, kernel_indim = k_indim, kernel_outdim = k_outdim;;
    	w = new float[kernel_row * kernel_col * kernel_indim * kernel_outdim]();
    	b = new float[kernel_outdim]();
    }
    void CONV_LAYER::load_kernel(float* w_data, float* b_data) {
    	for (int i = 0; i < kernel_row * kernel_col * kernel_indim * kernel_outdim; i++) {
    		*(w + i) = *(w_data + i);
    	}
    	for (int i = 0; i < kernel_outdim; i++) {
    		*(b + i) = *(b_data + i);
    	}
    }
    void CONV_LAYER::forward_pass(float* in_layer, bool relu_inplace) {
    	float* w2 = w, * b2 = b, * in_layer2 = in_layer, * layer2 = layer;
    	float corr_val;
    	int kernel_row_half = kernel_row / 2,
    		kernel_col_half = kernel_col / 2,
    		cor_row_start, cor_row_end, cor_col_start, cor_col_end,
    		row_start, row_end, col_start, col_end;
    
    	for (int dim = 0; dim < layer_dim; dim++) {
    		w2 = (w + dim * kernel_row * kernel_col * kernel_indim);
    		layer2 = (layer + dim * layer_row * layer_col);
    		b2 = (b + dim);
    
    		if (same_flag) {
    			row_start = 0, row_end = layer_row, col_start = 0, col_end = layer_col;//padding:same
    		}
    		else {
    			row_start = kernel_row_half, row_end = in_layer_row - kernel_row_half;
    			col_start = kernel_col_half, col_end = in_layer_col - kernel_col_half;
    		}
    
    		/* conv kernel operation */
    		for (int row = row_start; row < row_end; row++) {
    			for (int col = col_start; col < col_end; col++) {
    				cor_row_start = -kernel_row_half, cor_row_end = kernel_row_half;
    				cor_col_start = -kernel_col_half, cor_col_end = kernel_col_half;
    
    				/* padding:same options */
    				/* to handle zero padding */
    				if (row == 0) cor_row_start = 0;
    				else if (row == layer_row - 1) cor_row_end = 0;
    				
    				if (col == 0) cor_col_start = 0;
    				else if (col == layer_col - 1) cor_col_end = 0;
    
    				corr_val = 0;
    				for (int indim = 0; indim < kernel_indim; indim++) {
    					/* 2d-cross correlation operation */
    					for (int cor_row = cor_row_start; cor_row <= cor_row_end; cor_row++) {
    						for (int cor_col = cor_col_start; cor_col <= cor_col_end; cor_col++) {
    							corr_val += in_layer[indim * in_layer_row * in_layer_col + (row + cor_row) * in_layer_col + (col + cor_col)] *
    								w2[indim * kernel_row * kernel_col + (kernel_row_half + cor_row) * kernel_col + (kernel_col_half + cor_col)];
    						}
    					}
    				}
    				corr_val += *b2;
    
    				/* ReLU inplace operation! */
    				if (relu_inplace && (corr_val < 0)) corr_val = 0;
    
    				if (same_flag) layer2[row * layer_col + col] = corr_val;
    				else layer2[(row - row_start) * layer_col + col - col_start] = corr_val;
    			}
    		}		
    	}
    }
    
    
    /* ReLU_LAYER */
    void ReLU_LAYER::init_layer(int alldim) { // alldim: layer_row*layer_col*layer_dim
    	layer_alldim = alldim;
    	layer = new float[layer_alldim]();
    }
    void ReLU_LAYER::forward_pass(float* in_layer) {
    	/* ReLU operation */
    	for (int i = 0; i < layer_alldim; i++) {
    		if (*(in_layer + i) > 0) *(layer + i) = *(in_layer + i);
    	}
    }
    
    
    /* FC_LAYER */
    void FC_LAYER::init_layer(int in_alldim, int alldim) {
    	in_layer_alldim = in_alldim, layer_alldim = alldim;
    	layer = new float[layer_alldim]();
    }
    void FC_LAYER::init_wb() {
    	w = new float[in_layer_alldim * layer_alldim]();
    	b = new float[layer_alldim]();
    }
    void FC_LAYER::load_wb(float* w_data, float* b_data) {
    	for (int i = 0; i < in_layer_alldim * layer_alldim; i++) {
    		*(w + i) = *(w_data + i);
    	}
    	for (int i = 0; i < layer_alldim; i++) {
    		*(b + i) = *(b_data + i);
    	}
    }
    void FC_LAYER::forward_pass_after_pool(float* in_layer, bool relu_inplace) {
    	/* FC operation */
    	float* w2, *layer2, val;
    	w2 = w;
    	for (int i = 0; i < layer_alldim; i++) {
    		w2 = w + i * in_layer_alldim;
    		val = 0;
    		for (int dim = 0; dim < 16; dim++) {
    			for (int col = 0; col < 3; col++) {
    				for (int row = 0; row < 3; row++) {
    					val += in_layer[dim * 9 + row * 3 + col] * *w2++;
    				}
    			}
    		}
    		val += *(b + i);
    		if (relu_inplace && (val < 0)) val = 0;
    		*(layer + i) = val;
    	}
    }
    void FC_LAYER::forward_pass(float* in_layer, bool relu_inplace) {
    	float* w2, * layer2, val;
    	w2 = w;
    	for (int i = 0; i < layer_alldim; i++) {
    		w2 = w + i * in_layer_alldim;
    		val = 0;
    		for (int j = 0; j < in_layer_alldim; j++) {
    			val += *(w2 + j) * *(in_layer + j);
    		}
    		val += *(b + i);
    		if (relu_inplace && (val < 0)) val = 0;
    		*(layer + i) = val;
    	}
    }
    
    
    /* MXPOOLING_LAYER */
    void MXP_LAYER::init_layer(int * in_row, int * in_col, int dim, int pool_size) {
    	pool_sz = pool_size;
    	in_layer_row = * in_row, in_layer_col = * in_col, in_layer_dim = dim;
    	layer_row = (in_layer_row - (in_layer_row % pool_size)) / pool_size;
    	layer_col = (in_layer_col - (in_layer_col % pool_size)) / pool_size;
    	layer_dim = dim;
    	//layer_row = in_layer_row / pool_sz, layer_col = in_layer_col / pool_sz, layer_dim = dim;
    	*in_row = layer_row, *in_col = layer_col;
    	layer = new float[layer_row * layer_col * layer_dim]();
    }
    void MXP_LAYER::forward_pass(float* in_layer) {
    	float max = 0, val;
    	for (int dim = 0; dim < layer_dim; dim++) {
    
    		for (int row = 0; row < layer_row; row++) {
    			for (int col = 0; col < layer_col; col++) {
    
    				/* find maximum value of prev layer's pooling region */
    				max = 0;
    				for (int s_row = 0; s_row < pool_sz; s_row++) {
    					for (int s_col = 0; s_col < pool_sz; s_col++) {
    						val = in_layer[dim * in_layer_row * in_layer_col + (pool_sz * row + s_row) * in_layer_col + pool_sz * col + s_col];
    						if (val > max) max = val;
    					}
    				}
    				layer[dim * layer_row * layer_col + row * layer_col + col] = max;
    			}
    		}
    	}
    }
    
    
    /* SOFTMAX_LAYER */
    void SOFTMAX_LAYER::init_layer(int alldim) {
    	layer_alldim = alldim;
    	layer = new float[layer_alldim]();
    }
    void SOFTMAX_LAYER::forward_pass(float* in_layer) {
    	float sum = 0;
    	for (int i = 0; i < layer_alldim; i++) {
    		sum += exp(*(in_layer + i));
    	}
    	for (int i = 0; i < layer_alldim; i++) {
    		*(layer + i) = exp(*(in_layer + i)) / sum;
    	}
    }

     

    전체 레이어 선언부

    /* Define CNN layers */
    INPUT_LAYER INPUT_LAYER;
    CONV_LAYER CONV_LAYER1, CONV_LAYER2, CONV_LAYER3;
    MXP_LAYER MXP_LAYER1, MXP_LAYER2, MXP_LAYER3;
    FC_LAYER FC_LAYER1, FC_LAYER2, FC_LAYER3;
    SOFTMAX_LAYER SOFTMAX_LAYER;

     

    Input/Conv/FC 레이어 초기화

    int inb_row_size = iinb_row_size, inb_col_size = iinb_col_size;
    INPUT_LAYER.init_layer(inb_row_size, inb_col_size, 3);
    /* Conv1,  ReLU, Mxp1*/
    CONV_LAYER1.init_layer(&inb_row_size, &inb_col_size, net_width, false);
    MXP_LAYER1.init_layer(&inb_row_size, &inb_col_size, net_width, 2);
    /* Conv2, Mxp2, ReLU */
    CONV_LAYER2.init_layer(&inb_row_size, &inb_col_size, net_width, true);
    MXP_LAYER2.init_layer(&inb_row_size, &inb_col_size, net_width, 2);
    /* Conv3, Mxp2, ReLU */
    CONV_LAYER3.init_layer(&inb_row_size, &inb_col_size, net_width, false);
    MXP_LAYER3.init_layer(&inb_row_size, &inb_col_size, net_width, 2);
    /* FC1, ReLU */
    FC_LAYER1.init_layer(inb_row_size* inb_row_size* net_width, FC1_size);
    FC_LAYER1.init_wb();
    /* FC2, ReLU */
    FC_LAYER2.init_layer(FC1_size, FC2_size);
    FC_LAYER2.init_wb();
    /* FC3, SoftMax */
    FC_LAYER3.init_layer(FC2_size, FC3_size);
    FC_LAYER3.init_wb();
    SOFTMAX_LAYER.init_layer(FC3_size);

     

    Conv layer와 FC layer의 파라미터 초기화 및 불러오기

    /* Load weight/bias */
    CONV_LAYER1.init_kernel(3, 3, 3, net_width);
    CONV_LAYER1.load_kernel(conv1_w, conv1_b);
    CONV_LAYER2.init_kernel(3, 3, net_width, net_width);
    CONV_LAYER2.load_kernel(conv2_w, conv2_b);
    CONV_LAYER3.init_kernel(3, 3, net_width, net_width);
    CONV_LAYER3.load_kernel(conv3_w, conv3_b);
    FC_LAYER1.load_wb(fc1_w, fc1_b);
    FC_LAYER2.load_wb(fc2_w, fc2_b);
    FC_LAYER3.load_wb(fc3_w, fc3_b);

     

    Forward pass

    /* Forward pass */
    INPUT_LAYER.Mat_to_layer_Min_Max_Norm(cand_cropped);
    CONV_LAYER1.forward_pass(INPUT_LAYER.layer, true);
    MXP_LAYER1.forward_pass(CONV_LAYER1.layer);
    CONV_LAYER2.forward_pass(MXP_LAYER1.layer, true);
    MXP_LAYER2.forward_pass(CONV_LAYER2.layer);
    CONV_LAYER3.forward_pass(MXP_LAYER2.layer, true);
    MXP_LAYER3.forward_pass(CONV_LAYER3.layer);
    
    FC_LAYER1.forward_pass_after_pool(MXP_LAYER3.layer, true);
    FC_LAYER2.forward_pass(FC_LAYER1.layer, true);
    FC_LAYER3.forward_pass(FC_LAYER2.layer, false);
    SOFTMAX_LAYER.forward_pass(FC_LAYER3.layer);

    일단 끄읏

    댓글

Designed by Tistory.