Hi there,
Im using OpenCV 2.4.12 with Visual Studio 2012. I am trying to classify facial image based 3 age into three classes. groups. i have extracted the features using PCA in image form. How do i insert the features into knn algorithm?
I do not understand these lines. What are these files for?
FILE *fp2 = fopen("C:\Users\Dorothea\Desktop\train-labels-idx1-ubyte.gz","rb");
FILE *fp2 = fopen("C:\Users\Dorothea\Desktop\train-images.idx1-ubyte","rb");
Below is my knn PCA code.
..........
I need help to classify the images using namespace cv;
int readFlippedInteger(FILE *fp)
{the features extracted using knn.
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
static Mat norm_0_255(InputArray _src) {
Mat src = _src.getMat();
// Create and return normalized image:
Mat dst;
switch(src.channels()) {
case 1:
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
break;
case 3:
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
break;
default:
src.copyTo(dst);
break;
}
return dst;
}
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
int ret = main(int argc, const char *argv[]) {
// Check for valid command line arguments, print usage
// if no arguments were given.
if (argc < 2) {
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
exit(1);
}
string output_folder = ".";
if (argc == 3) {
output_folder = string(argv[2]);
}
// Get the path to your CSV.
string fn_csv = string(argv[1]);
// These vectors hold the images and corresponding labels.
vector<Mat> images;
vector<int> labels;
// Read in the data. This can fail if no valid
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Quit if there are not enough images for this demo.
if(images.size() <= 1) {
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
CV_Error(CV_StsError, error_message);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size:
int height = images[0].rows;
// The following lines simply get the last images from
// your dataset and remove it from the vector. This is
// done, so that the training data (which we learn the
// cv::FaceRecognizer on) and the test data we test
// the model with, do not overlap.
Mat testSample = images[images.size() - 1];
int testLabel = labels[labels.size() - 1];
images.pop_back();
labels.pop_back();
// The following lines create an Eigenfaces model for
// face recognition and train it with the images and
// labels read from the given CSV file.
// This here is a full PCA, if you just want to keep
// 10 principal components (read Eigenfaces), then call
// the factory method like this:
//
// cv::createEigenFaceRecognizer(10);
//
// If you want to create a FaceRecognizer with a
// confidence threshold (e.g. 123.0), call it with:
//
// cv::createEigenFaceRecognizer(10, 123.0);
//
// If you want to use _all_ Eigenfaces and have a threshold,
// then call the method like this:
//
// cv::createEigenFaceRecognizer(0, 123.0);
//
Ptr<FaceRecognizer> model0 = createEigenFaceRecognizer();
model0->train(images, labels);
// save the model to eigenfaces_at.yaml
model0->save("eigenfaces_at.yml");
//
//
// Now create a new Eigenfaces Recognizer
//
Ptr<FaceRecognizer> model1 = createEigenFaceRecognizer();
model1->load("eigenfaces_at.yml");
// The following line predicts the label of a given
// test image:
int predictedLabel = model1->predict(testSample);
//
// To get the confidence of a prediction call the model with:
//
// int predictedLabel = -1;
// double confidence = 0.0;
// model->predict(testSample, predictedLabel, confidence);
//
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
cout << result_message << endl;
// Here is how to get the eigenvalues of this Eigenfaces model:
Mat eigenvalues = model1->getMat("eigenvalues");
// And we can do the same to display the Eigenvectors (read Eigenfaces):
Mat W = model1->getMat("eigenvectors");
// Get the sample mean from the training data
Mat mean = model1->getMat("mean");
// Display or save:
if(argc == 2) {
imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
} else {
imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
}
// Display or save the Eigenfaces:
for (int i = 0; i < min(10, W.cols); i++) {
string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
cout << msg << endl;
// get eigenvector #i
Mat ev = W.col(i).clone();
// Reshape to original size & normalize to [0...255] for imshow.
Mat grayscale = norm_0_255(ev.reshape(1, height));
// Show the image & apply a Jet colormap for better sensing.
Mat cgrayscale;
applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
// Display or save:
if(argc == 2) {
imshow(format("eigenface_%d", i), cgrayscale);
} else {
imwrite(format("%s/eigenface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
}
}
// Display or save the image reconstruction at some predefined steps:
for(int num_components = 300; num_components < 10; num_components+=15) {
// slice the eigenvectors from the model
Mat evs = Mat(W, Range::all(), Range(0, num_components));
Mat projection = subspaceProject(evs, mean, images[0].reshape(1,1));
Mat reconstruction = subspaceReconstruct(evs, mean, projection);
// Normalize the result:
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
// Display or save:
if(argc == 2) {
imshow(format("eigenface_reconstruction_%d", num_components), reconstruction);
} else {
imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction);
}
}
// Display if we are not writing to an output folder:
if(argc == 2) {
waitKey(0);
}
return 0;
BYTE *temp;
temp = (BYTE*)(&ret);
fread(&temp[3], sizeof(BYTE), 1, fp);
fread(&temp[2], sizeof(BYTE), 1, fp);
fread(&temp[1], sizeof(BYTE), 1, fp);
fread(&temp[0], sizeof(BYTE), 1, fp);
return ret;
}
}
int main()
{
FILE *fp2 = fopen("C:\\Users\\Dorothea\\Desktop\\train-labels-idx1-ubyte.gz","rb");
FILE *fp2 = fopen("C:\\Users\\Dorothea\\Desktop\\train-images.idx1-ubyte","rb");
if(!fp||!fp2)
int magicNumber = readFlippedInteger(fp);
int numImages = readFlippedInteger(fp);
int numRows = readFlippedInteger(fp);
int numCols = readFlippedInteger(fp);
fseek(fp2, 0x88, SEEK_SET);
int size = numRows*numCols;
CvMat *trainingVectors = cvCreateMat(numImages, size, CV_32FC1);
CvMat *trainingLabels = cvCreateMat(numImages, 1, CV_32FC1);
BYTE *temp = new BYTE[size];
BYTE tempClass=0;
for(int=0;i<numImages,i++)
{
fread((void*)temp, size, 1, fp);
fread((void*)(&tempClass),sizeof(BYTE),1,fp2);
trainingLabels->data.fl[i]= tempClass;
for(int k=0; k<size; k++)
trainingVectors->data.fl[i*size+k]= temp[k];
}
KNearest knn(trainingVectors, trainingLabels);
printf("Maximum k: %d", knn.get_max_k());
fclose(fp);
fclose(fp2);
cvReleaseMat(&trainingVectors);
cvReleaseMat(&trainingLabels);
//////////////////// Testing //////////////////////////
fp= fopen("C:\\Users\\Dorothea\\Desktop\\t10k-images.idx3-ubyte", "rb");
fp= fopen("C:\\Users\\Dorothea\\Desktop\\t10k-labels.idx1-ubyte", "rb");
magicNumber = readFlippedInteger(fp);
numImages = readFlippedInteger(fp);
numRows = readFlippedInteger(fp);
numCols = readFlippedInteger(fp);
fseek(fp2, 0x08, SEEK_SET);
CvMat *testVectors = cvCreateMat(numImages, size, CV_32FC1);
CvMat *testLabels = cvCreateMat(numImages, 1, CV_32FC1);
CvMat *actualLabels = cvCreateMat(numImages, 1, CV_32FC1);
temp= new BYTE[size];
tempClass=1;
CvMat *currentTest= cvCreateMat(1, size, CV_32FC1);
CvMat *currentLabel = cvCreateMat(1, 1, CV_32FC1);
int totalCorrect=0;
for(int i=0; i<numImages; i++)
{
fread((void*)temp, size, 1, fp);
fread((void*)(&tempClass),sizeof(BYTE),1,fp2);
actualLabels->data.fl[i]= (float)tempClass;
for(int k=0; k<size; k++)
{
trainingVectors->data.fl[i*size+k]= temp[k];
currentTest->data.fl[k]= temp[k];
}
knn.find_nearest(currentTest, 5, currentLabel);
testLabels->data.fl[i] = currentLabel->data.fl[0];
if(currentLabel->data.fl[0]==actualLabels->data.fl[i])
totalCorrect++;
}
printf("Time: %d Accuracy: %f ", (int)time, (double)totalCorrect*100/(double)numImages);
return 0;
}
Thank you in advance