문제

I'm currently learning BPNN through Prof. Andrew Ng's online course on Coursera. I think I've kind of understood this method, and trying to implement it using C++ and Armadillo (a linear algebra library).

I'm exactly using things on Andrew's slides, but overall the code doesn't work well, anyone who can find out what's wrong about this code?

Andrew's slides are here: Slide_8 and slide_9, things about calculating a are in lecture8, and other things such as cost function J(theta) and dJ(theta) are in lecture9. And here's my code.

#define MAX_ITER 500

double lrate = 0.1;
double lambda = 0.0;

int numLayers;
int numHiddenLayerNode;
int numOutputNodes;
int numHiddenLayers = 1;

colvec vec2colvec(vector<double>& vec){
    int length = vec.size();
    colvec A(length);
    for(int i=0; i<length; i++){
        A(i) = vec[i];
    }
    return A;
}

rowvec vec2rowvec(vector<double>& vec){
    colvec A = vec2colvec(vec);
    return A.t();
}

mat vec2mat(vector<vector<double> >&vec){
    int cols = vec.size();
    int rows = vec[0].size();
    mat A(rows, cols);
    for(int i = 0; i<rows; i++){
        for(int j=0; j<cols; j++){
            A(i, j) = vec[j][i];
        }
    }
    return A;
}

colvec log(colvec vec){
    for(int i = 0; i < vec.size(); i++){
        vec(i) = log(vec(i));
    }
    return vec;
}

rowvec log(rowvec vec){
    for(int i = 0; i < vec.size(); i++){
        vec(i) = log(vec(i));
    }
    return vec;
}

double sigmoid(double z){
    return 1 / (exp(-z) + 1);
}

rowvec sigmoid(rowvec z){
    for(int i=0; i<z.size(); i++){
        z(i) = sigmoid(z(i));
    }
    return z;
}

colvec sigmoid(colvec z){
    rowvec temp = z.t();
    return (sigmoid(temp)).t();
}

double dsigmoid(double z){
    return z * (1 - z);
}

colvec dsigmoid(colvec a){
    colvec one = ones<colvec>(a.size());
    return a % (one - a);
}

rowvec dsigmoid(rowvec a){
    colvec temp = a.t();
    return (dsigmoid(temp)).t();
}

vector<colvec> getA(mat x, vector<mat>& weightsMatrix, int m){

    vector<colvec> a;
    colvec temp1(x.n_rows);
    a.push_back(temp1);
    for(int i=0; i<numHiddenLayers; i++){
        colvec temp(numHiddenLayerNode);
        a.push_back(temp);
    }
    colvec temp2(numOutputNodes);
    a.push_back(temp2);
    colvec one = ones<colvec>(1);
    for(int i = 0; i < a.size(); i++){
        if(i == 0) a[i] = x.col(m);
        else{
            colvec xtemp = a[i - 1];
            xtemp =  join_cols(one, xtemp);
            a[i] = weightsMatrix[i - 1] * xtemp;
            a[i] = sigmoid(a[i]);
        }
    }    
    return a;    
}

//h(xi) is just last vector of a
colvec gethm(vector<colvec> a){
    return a[a.size() - 1];
}

colvec getCostFunction(mat x, vector<mat>& weightsMatrix, mat y, double lambda){

    int nsamples = x.n_cols;
    colvec sum = zeros<colvec>(y.n_rows);
    for(int m = 0; m < nsamples; m++){
        vector<colvec> a = getA(x, weightsMatrix, m);
        colvec hx = gethm(a);
        colvec one = ones<colvec>(hx.size());
        for(int k = 0; k < y.n_rows; k++){
            if(y(k, m) == 0.0){
                sum = sum + log(one - hx);
            }else{
                sum = sum + log(hx);
            }
        }
    }
    sum = sum / (double)(- nsamples);
    double temp = lambda / 2 / nsamples;
    double sum2 = 0.0;
    for(int i = 0; i < weightsMatrix.size() - 1; i++){
        for(int j = 0; j < weightsMatrix[i].n_cols; j++){
            for(int k = 0; k < weightsMatrix[i].n_rows; k++){
                sum2 += weightsMatrix[i](k, j) * weightsMatrix[i](k, j);
            }
        }
    }
    return sum + temp * sum2;
}

vector<mat> getdJ(mat x, mat y, vector<mat>& weightsMatrix, double lambda){
    //big delta is temp variables for calculating dJ
    //let every variables in bigDelta to be zero.
    vector<mat> bigDelta;
    for(int i=0; i<weightsMatrix.size(); i++){
        mat temp = zeros<mat>(weightsMatrix[i].n_rows, weightsMatrix[i].n_cols);
        bigDelta.push_back(temp);
    }   
    vector<mat> dJ;
    for(int i=0; i<weightsMatrix.size(); i++){
        mat temp = zeros<mat>(weightsMatrix[i].n_rows, weightsMatrix[i].n_cols);
        dJ.push_back(temp);
    }
    int nsamples = x.n_cols;
    //use backProp method
    for(int m = 0; m < nsamples; m++){

        vector<colvec> a = getA(x, weightsMatrix, m);
        vector<colvec> tempDelta;
        for(int i=0; i<a.size(); i++){
            colvec temp = zeros<colvec>(a[i].size());
            tempDelta.push_back(temp);
        }
        //no tempDelta[0]
        for(int l = tempDelta.size() - 1; l > 0; l --){
            if(l == tempDelta.size() - 1){
                tempDelta[l] = a[l] - y.col(m);
            }else{
                mat mult = (weightsMatrix[l]).t() * tempDelta[l + 1];
                tempDelta[l] = mult.rows(1, mult.n_rows - 1) % dsigmoid(a[l]);
            }
        }
        for(int l = 0; l < bigDelta.size(); l++){
            colvec tp = ones<colvec>(1);
            tp =  join_cols(tp, a[l]);
            bigDelta[l] += tempDelta[l + 1] * tp.t();    
        }
    }
    for(int l = 0; l < bigDelta.size(); l++){
        dJ[l] = bigDelta[l] / (double)nsamples;
        dJ[l] = dJ[l] + lambda * weightsMatrix[l];
        for(int j = 0; j < dJ[l].n_rows; j++){
            dJ[l](j, 0) = dJ[l](j, 0) - lambda * weightsMatrix[l](j, 0);
        }            
    }
    return dJ;
}

colvec calculateY(colvec x, vector<mat> weightsMatrix){
    colvec result(x);
    colvec tp = ones<colvec>(1);
    for(int i=0; i<weightsMatrix.size(); i++){
        result = join_cols(tp, result);
        result = weightsMatrix[i] * result;
    }
    return result;
}

void bpnn(vector<vector<double> >&vecX, vector<vector<double> >&vecY, vector<vector<double> >& testX, vector<vector<double> >& testY){

    int nsamples = vecX.size();
    int nfeatures = vecX[0].size();
    //change vecX and vecY into matrix or vector.
    mat y = vec2mat(vecY);
    mat x = vec2mat(vecX);

    numLayers = numHiddenLayers + 1;
    numHiddenLayerNode = nfeatures * 5;
    numOutputNodes = vecY[0].size();
    //build weights matrices and randomly initialize them.
    vector<mat> weightsMatrix;
    mat tempmat;
    double init_epsilon = 0.12;
    //input --> first hidden layer:
    tempmat = randu<mat>(numHiddenLayerNode, nfeatures + 1);
    weightsMatrix.push_back(tempmat);
    //hidden layer --> hidden layer :
    for(int i=0; i< numHiddenLayers - 1; i++){
        tempmat = randu<mat>(numHiddenLayerNode, numHiddenLayerNode + 1);
        weightsMatrix.push_back(tempmat);
    }
    //last hidden layer --> output layer:
    tempmat = randu<mat>(numOutputNodes, numHiddenLayerNode + 1);
    weightsMatrix.push_back(tempmat);
    for(int i=0; i<weightsMatrix.size(); i++){
        weightsMatrix[i] = weightsMatrix[i] * (2 * init_epsilon) - init_epsilon;
    }
    //till now, we finished building weights matrices.

 /*
    //Gradient Checking (this checking gives right answers)
    vector<mat> dJ = getdJ(x, y, weightsMatrix, lambda);
    cout<<"test!!!!"<<endl;
    double step = 1e-4;
    for(int i=0; i<weightsMatrix.size(); i++){
        cout<<"################ Weight Layer "<<i<<endl;
        for(int j=0; j<weightsMatrix[i].n_rows; j++){
            for(int k=0; k<weightsMatrix[i].n_cols; k++){
                double memo = weightsMatrix[i](j, k);
                weightsMatrix[i](j, k) = memo + step;
                colvec value1 = getCostFunction(x, weightsMatrix, y, 0);
                weightsMatrix[i](j, k) = memo - step;
                colvec value2 = getCostFunction(x, weightsMatrix, y, 0);
                colvec tp = (value1 - value2) / (2 * step);
                cout<<tp(0)<<", "<<dJ[i](j, k)<<endl;
                weightsMatrix[i](j, k) = memo;
            }
        }
    }
    */

    int converge = 0;
    while(converge < MAX_ITER){
        vector<mat> dJ = getdJ(x, y, weightsMatrix, lambda);
        for(int j = 0; j < weightsMatrix.size(); j++){
            weightsMatrix[j] -= lrate * dJ[j];
        }
        colvec cost = getCostFunction(x, weightsMatrix, y, lambda);
        double costdouble = cost(0);
        cout<<"learning step: "<<converge<<", Cost function value = "<<costdouble<<endl;
        ++ converge;
    }
    cout<<"############result#############"<<endl;
    for(int i=0; i<testX.size(); i++){
        colvec tpcol = vec2colvec(testX[i]);
        colvec result = calculateY(tpcol, weightsMatrix);
        result = sigmoid(result);
        cout<<result<<endl;
    }

}

I tried Gradient checking method, and that gives right answer, so I think the J(theta) part is correct.

As I used 1/(exp(-z)+1) Sigmoid function, so the output should be either >=0.5 values or <0.5 values, however the values now all greater than 0.5, I'm really confused.

(to be clear, '%' in armadillo means '.*', and the other armadillo functions I used are quite clear and similar with matlab or octave stuffs)

Thanks.

올바른 솔루션이 없습니다

라이센스 : CC-BY-SA ~와 함께 속성
제휴하지 않습니다 StackOverflow
scroll top