Ask Your Question
0

Good to see that mlp has relu activation function but returns nan??

asked 2019-02-21 13:54:16 -0600

The following program uses ann_mlp to do the xor function. It works fine using sigmoid and gaussian activation functions but predicts nan for relu & leaky relu Any ideas?? Using CV 4.0.1

    #include <opencv2/core.hpp>
#include <opencv2/ml/ml.hpp>

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

void PrintWts(Ptr<ANN_MLP>& mlp) {
    Mat wts;

    Mat sizes = mlp->getLayerSizes();

    for (int layerIndex = 0; layerIndex < sizes.size().area(); ++layerIndex) {
        cout << format("layer %d: size(%d)\n", layerIndex, sizes.at<int>(layerIndex));
        cout << "weights:\n";
        wts = mlp->getWeights(layerIndex);
        cout << wts << "\n\n";
    }
}

void main() {
    vector<int> layerSizes = { 2, 4, 1 };
    vector<float> inputTrainingData = {
        0.0, 0.0,
        0.0, 1.0,
        1.0, 0.0,
        1.0, 1.0
    };
    Mat inputTrainingMat(Size(2, 4), CV_32FC1, inputTrainingData.data());
    vector<float> outputTrainingData = {
        0.0,
        1.0,
        1.0,
        0.0,
    };
    Mat outputTrainingMat(Size(1, 4), CV_32FC1, outputTrainingData.data());

    Ptr<TrainData> trainingData = TrainData::create(
        inputTrainingMat,
        ROW_SAMPLE,
        outputTrainingMat
    );
    TermCriteria termCrit = TermCriteria(
        TermCriteria::Type::COUNT + TermCriteria::Type::EPS,
        220,
        0.00000001
    );

    Ptr<ANN_MLP> mlp = ANN_MLP::create();
    mlp->setLayerSizes(layerSizes);
    mlp->setActivationFunction(ANN_MLP::SIGMOID_SYM);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::BACKPROP, 0.1, 0.1);

    mlp->train(trainingData);

    PrintWts(mlp);

    if (mlp->isTrained()) {
        for (int i = 0; i < inputTrainingMat.rows; i++) {
            Mat sample = inputTrainingMat.row(i);
            Mat result;
            mlp->predict(sample, result);
            cout << sample << " -> " << result << endl;
        }
    }
    system("pause");
}
edit retag flag offensive close merge delete

1 answer

Sort by ยป oldest newest most voted
0

answered 2019-02-21 14:23:40 -0600

LBerger gravatar image

updated 2019-02-22 04:35:36 -0600

You can't do like that. You should give two outputs. Try

    vector<int> layerSizes = { 2, 4, 2 };
    vector<float> inputTrainingData = {
        0.0, 0.0,
        0.0, 1.0,
        1.0, 0.0,
        1.0, 1.0
    };
    Mat inputTrainingMat(Size(2, 4), CV_32FC1, inputTrainingData.data());
    vector<float> outputTrainingData = {
        1.0,0,
        0.0,1,
        0.0,1,
        1.0,0
    };
    Mat outputTrainingMat(Size(2, 4), CV_32FC1, outputTrainingData.data());

    Ptr<TrainData> trainingData = TrainData::create(
        inputTrainingMat,
        ROW_SAMPLE,
        outputTrainingMat
    );
    TermCriteria termCrit = TermCriteria(
        TermCriteria::Type::COUNT + TermCriteria::Type::EPS,
        220,
        0.00000001
    );

    Ptr<ANN_MLP> mlp = ANN_MLP::create();
    mlp->setLayerSizes(layerSizes);
    mlp->setActivationFunction(ml::ANN_MLP::LEAKYRELU, 0.5);
    mlp->setTrainMethod(ml::ANN_MLP::RPROP);
    mlp->setTermCriteria(TermCriteria(TermCriteria::COUNT, 10000, 0.00001));
    mlp->train(trainingData, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE);

I use two column to encode because opencv is used for classification not for regression so you should use one-hot

Now if you want to use your initiall code. There is a problem with initial condition for the network and I need time to investigate. I use LEAKYRELU first (10 iteration) and RELU (ml::ANN_MLP::UPDATE_WEIGHTS) :

void main() {
    vector<int> layerSizes = { 2, 2, 1 };
    vector<float> inputTrainingData;
    vector<float> outputTrainingData;
    RNG rng;
    for (int i = 0; i < 10; i++)
    {
        int in1 = rng.uniform(0, 2);
        int in2 = rng.uniform(0, 2);
        inputTrainingData.push_back(in1);
        inputTrainingData.push_back(in2);
        if (in1 == in2)
            outputTrainingData.push_back(0);
        else
            outputTrainingData.push_back(1);
    }

    Mat inputTrainingMat(Size(2, inputTrainingData.size()/2), CV_32FC1, inputTrainingData.data());
     Mat outputTrainingMat(Size(1, outputTrainingData.size()), CV_32FC1, outputTrainingData.data());

    Ptr<TrainData> trainingData = TrainData::create(
        inputTrainingMat,
        ROW_SAMPLE,
        outputTrainingMat
    );

    Ptr<ANN_MLP> mlp = ANN_MLP::create();
    mlp->setLayerSizes(layerSizes);
    mlp->setActivationFunction(ml::ANN_MLP::LEAKYRELU);
    mlp->setTrainMethod(ANN_MLP::BACKPROP, 0.1, 0.1);
    mlp->setTermCriteria(TermCriteria(TermCriteria::COUNT, 10, 0.1));
    mlp->train(trainingData, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE);
    cout << "LEAKYRELU\n";
    if (mlp->isTrained()) {
        for (int i = 0; i < 10; i++) {
            Mat sample = inputTrainingMat.row(i);
            Mat result;
            mlp->predict(sample, result);
            cout << sample << " -> " << result << endl;
        }
    }

    mlp->setActivationFunction(ml::ANN_MLP::RELU);
    mlp->setTrainMethod(ANN_MLP::BACKPROP, 0.1, 0.1);
    mlp->setTermCriteria(TermCriteria(TermCriteria::COUNT, 1000, 0.001));
    mlp->train(trainingData, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE+ ml::ANN_MLP::UPDATE_WEIGHTS);

    cout << "RELU\n";

    if (mlp->isTrained()) {
        for (int i = 0; i < 10; i++) {
            Mat sample = inputTrainingMat.row(i);
            Mat result;
            mlp->predict(sample, result);
            cout << sample << " -> " << result << endl;
        }
    }
    system("pause");
}

Sometimes good network is found with only relu

    mlp->setActivationFunction(ml::ANN_MLP::RELU);
    mlp->setTrainMethod(ANN_MLP::BACKPROP, 0.1, 0.1);
    mlp->setTermCriteria(TermCriteria(TermCriteria::COUNT, 1000, 0.001));
    for (int i = 0; i < 10; i++)
    {
        mlp->train(trainingData, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE);

        cout << "RELU\n";

        if (mlp->isTrained()) {
            for (int i = 0; i < 10; i++) {
                Mat sample = inputTrainingMat.row(i);
                Mat result;
                mlp->predict(sample, result);
                cout << sample << " -> " << result << endl;
            }
        }

}
    system("pause");
edit flag offensive delete link more

Comments

Yes, this works but I would like to understand why. My understanding is that relu is just max(0, d) where d is dot product of inputs & weights plus bias Why does this need 2 outputs?? Also, why use rprop vs backprop for training relu?

Chris gravatar imageChris ( 2019-02-21 14:41:07 -0600 )edit

Also, why use rprop vs backprop for training relu?

Do what you want it does not matter

LBerger gravatar imageLBerger ( 2019-02-22 04:21:10 -0600 )edit

Thanks to LBerger, I was able to get this classic xor problem to work with mlp and relu (leaky relu). The network has 2 inputs, 4 nodes in hidden layer and one output. The key seems to be to use leaky relu with a small param1 value - about 0.2 or so. Also, specify NO_OUTPUT_SCALE for the training.

mlp->setActivationFunction(ANN_MLP::LEAKYRELU, 0.1);

...

mlp->train(trainingData, ml::ANN_MLP::NO_OUTPUT_SCALE); // +ml::ANN_MLP::NO_INPUT_SCALE);

Chris gravatar imageChris ( 2019-02-22 09:31:32 -0600 )edit

Question Tools

1 follower

Stats

Asked: 2019-02-21 13:53:12 -0600

Seen: 531 times

Last updated: Feb 22 '19