This repository has been archived by the owner on Feb 8, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 15
/
Copy pathcapture.cpp
101 lines (86 loc) · 2.77 KB
/
capture.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
#include "ht-api.h"
#include "ht-internal.h"
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
bool ht_get_image(headtracker_t& ctx) {
Mat large, large2;
if (!ctx.camera.isOpened())
return false;
while (!ctx.camera.read(large))
;;
ctx.color = large;
if (large.channels() == 3)
cvtColor(large, large2, COLOR_BGR2GRAY);
else
large2 = large;
#if 0
Size newSize(large2.cols *0.5, large2.rows *0.5);
resize(large2, ctx.grayscale, newSize, 0, 0, CV_INTER_AREA);
#else
ctx.grayscale = large2;
#endif
//equalizeHist(ctx.grayscale, ctx.grayscale);
return true;
}
HT_API(headtracker_t*) ht_make_context(const ht_config_t* config, const char* filename)
{
headtracker_t* ctx = new headtracker_t;
ctx->config = *config;
ctx->camera = filename
? VideoCapture(filename)
: VideoCapture(ctx->config.camera_index);
ctx->head_classifier = CascadeClassifier("haarcascade_frontalface_alt2.xml");
ctx->ticks_last_classification = ht_tickcount();
ctx->ticks_last_features = ctx->ticks_last_classification;
ctx->model = ht_load_model("head.raw");
ctx->bbox = ht_load_model("bounding-box.raw");
ctx->keypoint_uv = new Point3f[ctx->config.max_keypoints];
ctx->state = HT_STATE_LOST;
ctx->restarted = true;
ctx->zoom_ratio = 1.0;
ctx->keypoints = new ht_keypoint[ctx->config.max_keypoints];
for (int i = 0; i < ctx->config.max_keypoints; i++)
ctx->keypoints[i].idx = -1;
ctx->focal_length_w = -1;
ctx->focal_length_h = -1;
if (!filename) {
if (ctx->config.force_width)
ctx->camera.set(CV_CAP_PROP_FRAME_WIDTH, ctx->config.force_width);
if (ctx->config.force_height)
ctx->camera.set(CV_CAP_PROP_FRAME_HEIGHT, ctx->config.force_height);
if (ctx->config.force_fps)
ctx->camera.set(CV_CAP_PROP_FPS, ctx->config.force_fps);
}
ctx->pyr_a = new vector<Mat>();
ctx->pyr_b = new vector<Mat>();
ctx->hz = 0;
ctx->hz_last_second = -1;
ctx->ticks_last_second = ht_tickcount() / 1000;
ctx->flandmark_model = flandmark_init("flandmark_model.dat");
ctx->ticks_last_flandmark = -1;
ctx->fast_state = 20;
return ctx;
}
HT_API(void) ht_free_context(headtracker_t* ctx) {
ctx->camera.release();
if (ctx->keypoint_uv)
delete[] ctx->keypoint_uv;
if (ctx->model.triangles)
delete[] ctx->model.triangles;
if (ctx->model.projection)
delete[] ctx->model.projection;
if (ctx->keypoints)
delete[] ctx->keypoints;
delete ctx->pyr_a;
delete ctx->pyr_b;
flandmark_free(ctx->flandmark_model);
delete ctx;
}
HT_API(const Mat) ht_get_bgr_frame(headtracker_t* ctx) {
return ctx->color;
}
HT_API(cv::VideoCapture*) ht_capture(headtracker_t* ctx)
{
return &ctx->camera;
}