Boost Python wrapper 和 OpenCv 参数错误与 cv::Mat.



我有一个用Boost Python包装的C++类。

其中一个类方法需要两个cv::Mat,如下所示:

MyClass::do_something(cv::Mat input, cv::Mat output)

我用python提供的功能包括上面的方法,一个构造函数和一些打印方法。

初始化和打印方法(用于调试)在 C++ 和 Python 包装器中都运行良好:

obj = MyClass(arg1, arg2, arg3)
obj.print_things()

这些调用成功完成。

我在do_something()调用时遇到问题(在 Python 绑定中,它在 C++ 中成功完成):

from libmyclass import *
import cv
rgb = cv.CreateMat(256,256,cv.CV_8UC3)
result = cv.CreateMat(256,256,cv.CV_8UC3)
#...fill "rgb"
obj.do_something(rgb,result)

执行上面的python代码时我得到的错误是:

Boost.Python.ArgumentError: Python argument types in
MyClass.do_something(MyClass, cv2.cv.cvmat, cv2.cv.cvmat)
did not match C++ signature:
do_something(MyClass {lvalue}, cv::Mat, cv::Mat)

这是cv2.cv.Mat和cv::Mat 之间的差异吗?我有OpenCV 2.3.1和2.4,两者都带有Boost Python绑定。

如果相关,这是我的 Boost 包装器的样子:

#include <boost/python.hpp>
#include "MyClass.h"
#include <cv.h>
using namespace boost::python;
BOOST_PYTHON_MODULE(libmyclass) { 
  class_<MyClass>("MyClass", init<std::string, std::string, std::string>())
    .def("print_things", &MyClass::print_things)
    .def("do_something", &MyClass::do_something)
  ;
}
Boost python

不会自动将 cv2.cv.Mat (python) 转换为 cv::Mat (C++)。

你需要声明你的C++方法来获取 boost::object *,并在C++中加入额外的代码来将对象转换为 cv::Mat。

这是我包装 STASM 活动形状模型库的示例

#ifndef ASMSearcher_HPP
#define ASMSearcher_HPP
#include <string>
#include <boost/python.hpp>
#include <opencv2/core/core.hpp>
class ASMSearcher;
/*
 * Wrapper around STASM ASMSearcher class so that we don't mix boost python code into the STASM library.
 */
struct memtrack_t {
  PyObject_HEAD
  void *ptr;
  Py_ssize_t size;
};
struct cvmat_t
{
  PyObject_HEAD
  CvMat *a;
  PyObject *data;
  size_t offset;
};
struct iplimage_t {
  PyObject_HEAD
  IplImage *a;
  PyObject *data;
  size_t offset;
};
namespace bp = boost::python;
class Stasm
{
  public:
    Stasm();
    Stasm(const std::string &conf_file0, const std::string &conf_file1);
    ~Stasm();
    bp::list detect(bp::object image, const std::string &conf_file0="",
        const std::string &conf_file1="");
  private:
    ASMSearcher *asmLandmarksSearcher;
    cv::Mat convertObj2Mat(bp::object image);
    cv::Mat convert_from_cviplimage(PyObject *o,const char *name);
    cv::Mat convert_from_cvmat(PyObject *o, const char* name);
};
#endif

#include "stasm.hpp"
#include "stasm_ocv.hpp"
#include <opencv2/highgui/highgui.hpp>
Stasm::Stasm() 
{
  asmLandmarksSearcher = NULL;
}
Stasm::~Stasm() 
{
  if (asmLandmarksSearcher != NULL)
    delete asmLandmarksSearcher;
}
Stasm::Stasm(const std::string &conf_file0, const std::string &conf_file1)
{
  asmLandmarksSearcher = new ASMSearcher(conf_file0, conf_file1); 
}
/*Detect asm facial landmarks in image*/
bp::list Stasm::detect(bp::object image, 
    const std::string &conf_file0, 
    const std::string &conf_file1)
{
  const char *file0 = conf_file0 == "" ? NULL : conf_file0.c_str();
  const char *file1 = conf_file1 == "" ? NULL : conf_file1.c_str();
  // Convert pyobject to IplImage/Mat etc.
  cv::Mat img = convertObj2Mat(image);
  bool isColor = img.channels() == 3 ? true : false;
  int nlandmarks;
  int landmarks[500]; // space for x,y coords of up to 250 landmarks
  asmLandmarksSearcher->search(&nlandmarks, landmarks,
      "image_name", (const char*)img.data, img.cols, img.rows,
      isColor /* is_color */, file0 /* conf_file0 */, file1 /* conf_file1 */);
      //isColor /* is_color */, NULL /* conf_file0 */, NULL /* conf_file1 */);
  // Convert landmarks to python list object
  bp::list pyLandmarks;
  for (int i = 0; i < 2*nlandmarks; i++)
    pyLandmarks.append(landmarks[i]);
  return pyLandmarks;
}
cv::Mat Stasm::convert_from_cvmat(PyObject *o, const char* name)
{
  cv::Mat dest;
  cvmat_t *m = (cvmat_t*)o;
  void *buffer;
  Py_ssize_t buffer_len;
  m->a->refcount = NULL;
  if (m->data && PyString_Check(m->data))
  {
    assert(cvGetErrStatus() == 0);
    char *ptr = PyString_AsString(m->data) + m->offset;
    cvSetData(m->a, ptr, m->a->step);
    assert(cvGetErrStatus() == 0);
    dest = m->a;
  }
  else if (m->data && PyObject_AsWriteBuffer(m->data, &buffer, &buffer_len) == 0)
  {
    cvSetData(m->a, (void*)((char*)buffer + m->offset), m->a->step);
    assert(cvGetErrStatus() == 0);
    dest = m->a;
  }
  else
  {
    printf("CvMat argument '%s' has no data", name);
    //failmsg("CvMat argument '%s' has no data", name);
  }
  return dest;
}
cv::Mat Stasm::convert_from_cviplimage(PyObject *o,const char *name)
{
  cv::Mat dest;
  iplimage_t *ipl = (iplimage_t*)o;
  void *buffer;
  Py_ssize_t buffer_len;
  if (PyString_Check(ipl->data)) {
    cvSetData(ipl->a, PyString_AsString(ipl->data) + ipl->offset, ipl->a->widthStep);
    assert(cvGetErrStatus() == 0);
    dest = ipl->a;
  } else if (ipl->data && PyObject_AsWriteBuffer(ipl->data, &buffer, &buffer_len) == 0) {
    cvSetData(ipl->a, (void*)((char*)buffer + ipl->offset), ipl->a->widthStep);
    assert(cvGetErrStatus() == 0);
    dest = ipl->a;
  } else {
    printf("IplImage argument '%s' has no data", name);
  }
  return dest;
}
cv::Mat Stasm::convertObj2Mat(bp::object image)
{
  if(strcmp(image.ptr()->ob_type->tp_name,"cv2.cv.iplimage") == 0)
  {
    return convert_from_cviplimage(image.ptr(),image.ptr()->ob_type->tp_name);
  }
  else
    return convert_from_cvmat(image.ptr(), image.ptr()->ob_type->tp_name);
}

测试它的示例代码如下所示:

#!/usr/bin/env python
import cv2
import pystasm
import numpy as np
import sys
DEFAULT_TEST_IMAGE = "428.jpg"
def getFacePointsMapping():
  mapping = {}
  fhd = open('mapping2.txt')
  line = fhd.readline()
  a = line.split()
  for i, n in enumerate(a):
    mapping[int(n)] = i
  return mapping
def drawFaceKeypoints(img, landmarks):
  mapping = getFacePointsMapping()
  numpyLandmarks = np.asarray(landmarks)
  numLandmarks = len(landmarks) / 2
  numpyLandmarks = numpyLandmarks.reshape(numLandmarks, -1)
  for i in range(0, len(landmarks) - 1, 2):
    pt = (landmarks[i], landmarks[i+1])
    #cv2.polylines(img, [numpyLandmarks], False, (0, 255, 0))
    number = mapping[i/2]
    cv2.circle(img, pt, 3, (255, 0, 0), cv2.cv.CV_FILLED)
    cv2.putText(img, str(number), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255)) 
  return img
def getFacePointsMapping():
  mapping = []
  fhd = open('mapping2.txt')
  line = fhd.readline()
  a = line.split()
  for n in a:
    mapping.append(n)
  return mapping
def main():
  asmsearcher = pystasm.Stasm('mu-68-1d.conf', 'mu-76-2d.conf')
  if len(sys.argv) == 2:
    imagename = sys.argv[1]
  else:
    imagename = DEFAULT_TEST_IMAGE
# Detect facial keypoints in image
  img = cv2.imread(imagename)
  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  landmarks = asmsearcher.detect(cv2.cv.fromarray(img))
  img = drawFaceKeypoints(img, landmarks)
  #numpyLandmarks = np.asarray(landmarks)
  #numLandmarks = len(landmarks) / 2
  #numpyLandmarks = numpyLandmarks.reshape(numLandmarks, -1)
  #for i in range(0, len(landmarks) - 1, 2):
  #  pt = (landmarks[i], landmarks[i+1])
  #  #cv2.polylines(img, [numpyLandmarks], False, (0, 255, 0))
  #  number = mapping[i/2]
  #  cv2.circle(img, pt, 3, (255, 0, 0), cv2.cv.CV_FILLED)
  #  cv2.putText(img, str(number), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255)) 
  cv2.imshow("test", img)
  cv2.waitKey()
if __name__ == '__main__':
  main()

抱歉,我没有时间清理代码。请注意,您需要调用 cv2.cv.fromarray(numpy_array) 才能使其正常工作。我仍在尝试弄清楚如何将 numpy 数组直接传递给 python boost。如果您已经弄清楚了,请告诉我:)。

顺便说一句,我应该补充一点,用于转换boost对象和opencv的IplImage和Mat的代码取自OpenCV的源代码。

最新更新