diff --git a/demo_ncnn/README.md b/demo_ncnn/README.md index c2bd626c3..094ef792b 100644 --- a/demo_ncnn/README.md +++ b/demo_ncnn/README.md @@ -53,6 +53,8 @@ git clone --recursive https://github.com/Tencent/ncnn.git Build NCNN following this tutorial: [Build for Linux / NVIDIA Jetson / Raspberry Pi](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-linux) +Note: Don't forget to make install the ncnn after you finished compiling it. + ### Step4. Set environment variables. Run: diff --git a/demo_ncnn/main.cpp b/demo_ncnn/main.cpp index d5901b746..e55769909 100644 --- a/demo_ncnn/main.cpp +++ b/demo_ncnn/main.cpp @@ -161,6 +161,32 @@ const int color_list[80][3] = {127 ,127 , 0}, }; +#include // for std::ofstream + +#include // for uint32_t +#include // for ioctl +#include // for fb_ +#include // for O_RDWR +struct framebuffer_info { + uint32_t bits_per_pixel; uint32_t xres_virtual; +}; +struct framebuffer_info get_framebuffer_info(const char* framebuffer_device_path) { + struct framebuffer_info info; + struct fb_var_screeninfo screen_info; + int fd = -1; + fd = open(framebuffer_device_path, O_RDWR); + if (fd >= 0) { + if (!ioctl(fd, FBIOGET_VSCREENINFO, &screen_info)) { + info.xres_virtual = screen_info.xres_virtual; + info.bits_per_pixel = screen_info.bits_per_pixel; + } + } + return info; +}; + +static framebuffer_info fb_info = get_framebuffer_info("/dev/fb0"); +std::ofstream ofs("/dev/fb0"); + void draw_bboxes(const cv::Mat& bgr, const std::vector& bboxes, object_rect effect_roi) { static const char* class_names[] = { "person", "bicycle", "car", "motorcycle", "airplane", "bus", @@ -218,7 +244,17 @@ void draw_bboxes(const cv::Mat& bgr, const std::vector& bboxes, object_ cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255, 255, 255)); } - cv::imshow("image", image); + //cv::imshow("image", image); + int framebuffer_width = fb_info.xres_virtual; + int framebuffer_depth = fb_info.bits_per_pixel; + cv::Size2f frame_size = image.size(); + cv::Mat framebuffer_compat; + + cv::cvtColor(image, framebuffer_compat, 12); + for (int y = 0; y < frame_size.height ; y++) { + ofs.seekp(y*framebuffer_width*2); + ofs.write(reinterpret_cast(framebuffer_compat.ptr(y)),frame_size.width*2); + } } @@ -250,13 +286,40 @@ int image_demo(NanoDet &detector, const char* imagepath) return 0; } +std::string gstreamer_pipeline(int capture_width, int capture_height, int framerate, int display_width, int display_height) { + return + " libcamerasrc ! video/x-raw, " + " width=" + std::to_string(capture_width) + "," + " height=" + std::to_string(capture_height) + "," + " framerate=" + std::to_string(framerate) +"/1 !" + " videoconvert ! videoscale ! " + " video/x-raw," + " width=(int)" + std::to_string(display_width) + "," + " height=(int)" + std::to_string(display_height) + " ! appsink"; +} + int webcam_demo(NanoDet& detector, int cam_id) { + int capture_width = 2592; //1280 ; + int capture_height = 1944; //720 ; + int framerate = 30 ; + int display_width = 190; //1280 ; + int display_height = 190; //720 ; + + std::string pipeline = gstreamer_pipeline(capture_width, capture_height, framerate, + display_width, display_height); + cv::Mat image; - cv::VideoCapture cap(cam_id); + //cv::VideoCapture cap(cam_id); + cv::VideoCapture cap(pipeline, cv::CAP_GSTREAMER); int height = detector.input_size[0]; int width = detector.input_size[1]; + if(!cap.isOpened()) { + std::cerr << "Could not open video device." << std::endl; + return 1; + } + while (true) { cap >> image; diff --git a/demo_ncnn/nanodet.h b/demo_ncnn/nanodet.h index cc26c0a35..2ce9f525f 100644 --- a/demo_ncnn/nanodet.h +++ b/demo_ncnn/nanodet.h @@ -44,7 +44,7 @@ class NanoDet ncnn::Net* Net; static bool hasGPU; // modify these parameters to the same with your config if you want to use your own model - int input_size[2] = {416, 416}; // input height and width + int input_size[2] = {190, 190}; // input height and width int num_class = 80; // number of classes. 80 for COCO int reg_max = 7; // `reg_max` set in the training config. Default: 7. std::vector strides = { 8, 16, 32, 64 }; // strides of the multi-level feature.