光流法OPenCV 下载本文

内容发布更新时间 : 2024/6/26 17:31:31星期一 下面是文章的全部内容请认真阅读。

/* --Sparse Optical Flow Demo Program--

* Written by David Stavens (david.stavens@ai.stanford.edu) */

#include #include

#include #include

staticconst double pi = 3.14159265358979323846;

inline static double square(int a) { return a * a; }

/* This is just an inline that allocates images. I did this to reduce clutter in the

* actual computer vision algorithmic code. Basically it allocates the requested image

* unless that image is already non-NULL. It always leaves a non-NULL image as-is even

* if that image's size, depth, and/or channels are different than the request. */

inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels ) { if ( *img != NULL ) return; *img = cvCreateImage( size, depth, channels ); if ( *img == NULL ) { fprintf(stderr, \Couldn't allocate image. Out of memory?\\n\ exit(-1); } }

int main(void) { /* Create an object that decodes the input video stream. */ CvCapture *input_video = cvCaptureFromFile( \Stavens\\\\Desktop\\\\223B-Demo\\\\optical_flow_input.avi\

); if (input_video == NULL) { /* Either the video didn't exist OR it uses a codec OpenCV * doesn't support. */ fprintf(stderr, \ return -1; } /* Read the video's frame size out of the AVI. */ CvSizeframe_size; frame_size.height = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT ); frame_size.width = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH ); /* Determine the number of frames in the AVI. */ longnumber_of_frames; /* Go to the end of the AVI (ie: the fraction is \ cvSetCaptureProperty(input_video, CV_CAP_PROP_POS_AVI_RATIO, 1. ); /* Now that we're at the end, read the AVI position in frames */ number_of_frames = (int) cvGetCaptureProperty(input_video, CV_CAP_PROP_POS_FRAMES ); /* Return to the beginning */ cvSetCaptureProperty(input_video, CV_CAP_PROP_POS_FRAMES, 0. ); /* Create a windows called \output. * Have the window automatically change its size to match the output. */ cvNamedWindow(\ longcurrent_frame = 0; while(true) { staticIplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;

/* Go to the frame we want. Important if multiple frames are queried in * the loop which they of course are for optical flow. Note that the very * first call to this is actually not needed. (Because the correct position * is set outsite the for() loop.) */ cvSetCaptureProperty(input_video, CV_CAP_PROP_POS_FRAMES, current_frame ); /* Get the next frame of the video. * IMPORTANT! cvQueryFrame() always returns a pointer to the _same_ * memory location. So successive calls: * frame1 = cvQueryFrame(); * frame2 = cvQueryFrame(); * frame3 = cvQueryFrame(); * will result in (frame1 == frame2 && frame2 == frame3) being true. * The solution is to make a copy of the cvQueryFrame() output. */ frame = cvQueryFrame( input_video ); if (frame == NULL) { /* Why did we get a NULL frame? We shouldn't be at the end. */ fprintf(stderr, \Hmm. The end came sooner than we thought.\\n\ return -1; } /* Allocate another image if not already allocated. * Image has ONE channel of color (ie: monochrome) with 8-bit \ * This is the image format OpenCV algorithms actually operate on (mostly). */ allocateOnDemand(&frame1_1C, frame_size, IPL_DEPTH_8U, 1 ); /* Convert whatever the AVI image format is into OpenCV's preferred format. * AND flip the image vertically. Flip is a shameless hack. OpenCV reads

* in AVIs upside-down by default. (No comment :-)) */ cvConvertImage(frame, frame1_1C, CV_CVTIMG_FLIP); /* We'll make a full color backup of this frame so that we can draw on it. * (It's not the best idea to draw on the static memory space of cvQueryFrame().) */ allocateOnDemand(&frame1, frame_size, IPL_DEPTH_8U, 3 ); cvConvertImage(frame, frame1, CV_CVTIMG_FLIP); /* Get the second frame of video. Same principles as the first. */ frame = cvQueryFrame( input_video ); if (frame == NULL) { fprintf(stderr, \Hmm. The end came sooner than we thought.\\n\ return -1; } allocateOnDemand(&frame2_1C, frame_size, IPL_DEPTH_8U, 1 ); cvConvertImage(frame, frame2_1C, CV_CVTIMG_FLIP); /* Shi and Tomasi Feature Tracking! */ /* Preparation: Allocate the necessary storage. */ allocateOnDemand(&eig_image, frame_size, IPL_DEPTH_32F, 1 ); allocateOnDemand(&temp_image, frame_size, IPL_DEPTH_32F, 1 ); /* Preparation: This array will contain the features found in frame 1. */ CvPoint2D32f frame1_features[400]; /* Preparation: BEFORE the function call this variable is the array size * (or the maximum number of features to find). AFTER the function call * this variable is the number of features actually found. */

intnumber_of_features; /* I'm hardcoding this at 400. But you should make this a #define so that you can * change the number of features you use for an accuracy/speed tradeoff analysis. */ number_of_features = 400; /* Actually run the Shi and Tomasi algorithm!! * \ * \the algorithm. * The first \features (based on the eigenvalues). * The second \distance between features. * \point to a part of the image. * WHEN THE ALGORITHM RETURNS: * \ * \indicating the number of feature points found. */ cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, &number_of_features, .01, .01, NULL); /* Pyramidal Lucas Kanade Optical Flow! */ /* This array will contain the locations of the points from frame 1 in frame 2. */ CvPoint2D32f frame2_features[400]; /* The i-th element of this array will be non-zero if and only if the i-th feature of * frame 1 was found in frame 2. */ charoptical_flow_found_feature[400]; /* The i-th element of this array is the error in the optical flow for the i-th feature * of frame1 as found in frame 2. If the i-th feature was not found (see the array above) * I think the i-th entry in this array is undefined.