System information OpenCV 3.4.1 Operating System / Platform : Ubuntu 14.04,Android Studio
Detailed description: I use tensorflow object detection(https://github.com/tensorflow/models/tree/master/research/object_detection) to train a detector,but when I test the model on PC and my cell phone,I find a strange thing.In PC,the model can detect the object accurately ,but on cell phone,the result is quite different,but the code and the models are exactly the same.
PC Code:
``` void TestDetector_Image() {
// load net
Net net = dnn::readNetFromTensorflow("Test.pb",
"Test.pbtxt");
// input network
Mat srcImage=imread("1.jpg");
Mat inputBlob = blobFromImage(srcImage, 1 / 127.5f,
Size(512, 512),
Scalar(127.5, 127.5, 127.5),
true, false);
net.setInput(inputBlob);
// forward
Mat detection = net.forward();
// get results
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
vector<Rect> detectedRects;
for (int i = 0; i < detectionMat.rows; i++)
{
float confidence = detectionMat.at<float>(i, 2);
if (confidence > 0.5)
{
int objectClass = (int)(detectionMat.at<float>(i, 1));
int left = static_cast<int>(detectionMat.at<float>(i, 3) * srcImage.cols);
int top = static_cast<int>(detectionMat.at<float>(i, 4) * srcImage.rows);
int right = static_cast<int>(detectionMat.at<float>(i, 5) * srcImage.cols);
int bottom = static_cast<int>(detectionMat.at<float>(i, 6) * srcImage.rows);
Rect box(Point(left, top), Point(right, bottom));
detectedRects.push_back(box);
}
}
// print results
for(int i=0;i<detectedRects.size();++i)
{
cv::rectangle(srcImage,detectedRects[i],Scalar(0,255,255),2);
printf("%d: %d %d %d %d\n",i,detectedRects[i].x,detectedRects[i].y,detectedRects[i].width,detectedRects[i].height);
}
imwrite("Result.jpg",srcImage);
}
```
Android Code:
``` java code
public class Detector implements AutoCloseable {
private static boolean libraryFound = false;
// Used to load the 'native-lib' library on application startup.
static {
try {
System.loadLibrary("native-lib");
libraryFound = true;
} catch (UnsatisfiedLinkError error) {
error.printStackTrace();
Log.e(TAG, "libnative-lib.so not found");
}
}
private final Object lockObj = new Object();
public Detector() {
if(!libraryFound) {
return;
}
allocate();
if(nativeHandler == 0) throw new RuntimeException("Detector allocate error.");
}
@Override
public void close() throws Exception {
synchronized (lockObj) {
if(nativeHandler == 0) return;
deallocate();
nativeHandler = 0;
}
}
public int init(AssetManager assetManager) {
return nativeInit(assetManager);
}
public Rect detect(Bitmap bitmap) {
if(bitmap == null) return null;
if(bitmap.getConfig() != Bitmap.Config.ARGB_8888) return null;
return nativeDetect(bitmap);
}
////////////////////////////// Native Code //////////////////////////
private long nativeHandler;
private native void allocate();
private native void deallocate();
private native int nativeInit(AssetManager assetManager);
private native Rect nativeDetect(Bitmap bitmap);
}
jni code called by detect() in java code JNIEXPORT jobject JNICALL DETECTOR_METHOD(nativeDetect)(JNIEnv *env, jobject instance, jobject bitmap) {
AndroidBitmapInfo info;
void* pixels = 0;
int ret;
if((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
LOG_E("AndroidBitmap_getInfo() failed.");
return NULL;
}
if(info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
LOG_W("Bitmap format is not RGBA_8888");
return NULL;
}
if((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
LOG_W("AndroidBitmap_lockPixels() failed.");
return NULL;
}
cv::Mat img(info.height, info.width, CV_8UC4, pixels);
AndroidBitmap_unlockPixels(env, bitmap);
cv::cvtColor(img, img, cv::COLOR_RGBA2BGR);
std::vector<Box> boxs;
get_detector(env, instance)->detect(img, boxs);
if(boxs.size() < 1)
return NULL;
jclass rectClazz = env->FindClass("android/graphics/Rect");
jmethodID rectConstructorMethod = env->GetMethodID(rectClazz, "<init>", "(IIII)V");
int left = boxs[0].bunding_box.x;
int top = boxs[0].bunding_box.y;
int right = boxs[0].bunding_box.x + boxs[0].bunding_box.width;
int bottom = boxs[0].bunding_box.x + boxs[0].bunding_box.height;
jobject rect = env->NewObject(rectClazz, rectConstructorMethod, left, top, right, bottom);
env->DeleteLocalRef(rectClazz);
return rect;
}
the cpp code called by jni code void detect(const cv::Mat& image, std::vector<box>& boxs) { cv::Mat blob = cv::dnn::blobFromImage(image, in_scalar_factor_, cv::Size(in_width_, in_height_), cv::Scalar(mean_val_, mean_val_, mean_val_), true, false);
net_.setInput(blob);
cv::Mat detection = net_.forward();
cv::Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
for (int i = 0; i < detectionMat.rows; ++i) {
float confidence = detectionMat.at<float>(i, 2);
if(confidence >= threshold_) {
int objClass = (int) detectionMat.at<float>(i, 1);
int left = static_cast<int>(detectionMat.at<float>(i, 3) * image.cols);
int top = static_cast<int>(detectionMat.at<float>(i, 4) * image.rows);
int right = static_cast<int>(detectionMat.at<float>(i, 5) * image.cols);
int bottom = static_cast<int>(detectionMat.at<float>(i, 6) * image.rows);
cv::Rect rect(cv::Point(left, top), cv::Point(right, bottom));
Box box;
box.bunding_box = rect;
box.confidence = confidence;
boxs.push_back(box);
}
}
}
``` I use the same image in pc and android,but the results are quite different: the face rect(format:x,y,width,height): PC: 119,10,205,207 Android:118,11,207,313
I do not know why!