2D stabilization: flip orientation of the scale parameter
[blender-staging.git] / source / blender / blenkernel / intern / tracking_stabilize.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2011 Blender Foundation.
19  * All rights reserved.
20  *
21  * Contributor(s): Blender Foundation,
22  *                 Sergey Sharybin
23  *                 Keir Mierle
24  *                 Ichthyostega
25  *
26  * ***** END GPL LICENSE BLOCK *****
27  */
28
29 /** \file blender/blenkernel/intern/tracking_stabilize.c
30  *  \ingroup bke
31  *
32  * This file contains implementation of 2D image stabilization.
33  */
34
35 #include <limits.h>
36
37 #include "DNA_movieclip_types.h"
38 #include "DNA_scene_types.h"
39 #include "DNA_anim_types.h"
40 #include "RNA_access.h"
41
42 #include "BLI_utildefines.h"
43 #include "BLI_sort_utils.h"
44 #include "BLI_math_vector.h"
45 #include "BLI_math.h"
46
47 #include "BKE_tracking.h"
48 #include "BKE_movieclip.h"
49 #include "BKE_fcurve.h"
50 #include "BLI_ghash.h"
51
52 #include "MEM_guardedalloc.h"
53 #include "IMB_imbuf_types.h"
54 #include "IMB_imbuf.h"
55
56
57 /* == Parameterization constants == */
58
59 /* When measuring the scale changes relative to the rotation pivot point, it
60  * might happen accidentally that a probe point (tracking point), which doesn't
61  * actually move on a circular path, gets very close to the pivot point, causing
62  * the measured scale contribution to go toward infinity. We damp this undesired
63  * effect by adding a bias (floor) to the measured distances, which will
64  * dominate very small distances and thus cause the corresponding track's
65  * contribution to diminish.
66  * Measurements happen in normalized (0...1) coordinates within a frame.
67  */
68 static float SCALE_ERROR_LIMIT_BIAS = 0.01f;
69
70 /* When to consider a track as completely faded out.
71  * This is used in conjunction with the "disabled" flag of the track
72  * to determine start positions, end positions and gaps
73  */
74 static float EPSILON_WEIGHT = 0.005f;
75
76
77
78 /* == private working data == */
79
80 /* Per track baseline for stabilization, defined at reference frame.
81  * A track's reference frame is chosen as close as possible to the (global)
82  * anchor_frame. Baseline holds the constant part of each track's contribution
83  * to the observed movement; it is calculated at initialization pass, using the
84  * measurement value at reference frame plus the average contribution to fill
85  * the gap between global anchor_frame and the reference frame for this track.
86  * This struct with private working data is associated to the local call context
87  * via `StabContext::private_track_data`
88  */
89 typedef struct TrackStabilizationBase {
90         float stabilization_offset_base[2];
91
92         /* measured relative to translated pivot */
93         float stabilization_rotation_base[2][2];
94
95         /* measured relative to translated pivot */
96         float stabilization_scale_base;
97
98         bool is_init_for_stabilization;
99         FCurve *track_weight_curve;
100 } TrackStabilizationBase;
101
102 /* Tracks are reordered for initialization, starting as close as possible to
103  * anchor_frame
104  */
105 typedef struct TrackInitOrder {
106         int sort_value;
107         int reference_frame;
108         MovieTrackingTrack *data;
109 } TrackInitOrder;
110
111 /* Per frame private working data, for accessing possibly animated values. */
112 typedef struct StabContext {
113         MovieClip *clip;
114         MovieTracking *tracking;
115         MovieTrackingStabilization *stab;
116         GHash *private_track_data;
117         FCurve *locinf;
118         FCurve *rotinf;
119         FCurve *scaleinf;
120         FCurve *target_pos[2];
121         FCurve *target_rot;
122         FCurve *target_scale;
123         bool use_animation;
124 } StabContext;
125
126
127 static TrackStabilizationBase *access_stabilization_baseline_data(
128         StabContext *ctx,
129         MovieTrackingTrack *track)
130 {
131         return BLI_ghash_lookup(ctx->private_track_data, track);
132 }
133
134 static void attach_stabilization_baseline_data(
135         StabContext *ctx,
136         MovieTrackingTrack *track,
137         TrackStabilizationBase *private_data)
138 {
139         return BLI_ghash_insert(ctx->private_track_data, track, private_data);
140 }
141
142 static void discard_stabilization_baseline_data(void *val)
143 {
144         if (val != NULL) {
145                 MEM_freeN(val);
146         }
147 }
148
149
150 /* == access animated values for given frame == */
151
152 static FCurve *retrieve_stab_animation(MovieClip *clip,
153                                        const char *data_path,
154                                        int idx)
155 {
156         return id_data_find_fcurve(&clip->id,
157                                    &clip->tracking.stabilization,
158                                    &RNA_MovieTrackingStabilization,
159                                    data_path,
160                                    idx,
161                                    NULL);
162 }
163
164 static FCurve *retrieve_track_weight_animation(MovieClip *clip,
165                                                MovieTrackingTrack *track)
166 {
167         return id_data_find_fcurve(&clip->id,
168                                    track,
169                                    &RNA_MovieTrackingTrack,
170                                    "weight_stab",
171                                    0,
172                                    NULL);
173 }
174
175 static float fetch_from_fcurve(FCurve *animationCurve,
176                                int framenr,
177                                StabContext *ctx,
178                                float default_value)
179 {
180         if (ctx && ctx->use_animation && animationCurve) {
181                 int scene_framenr = BKE_movieclip_remap_clip_to_scene_frame(ctx->clip,
182                                                                             framenr);
183                 return evaluate_fcurve(animationCurve, scene_framenr);
184         }
185         return default_value;
186 }
187
188
189 static float get_animated_locinf(StabContext *ctx, int framenr)
190 {
191         return fetch_from_fcurve(ctx->locinf, framenr, ctx, ctx->stab->locinf);
192 }
193
194 static float get_animated_rotinf(StabContext *ctx, int framenr)
195 {
196         return fetch_from_fcurve(ctx->rotinf, framenr, ctx, ctx->stab->rotinf);
197 }
198
199 static float get_animated_scaleinf(StabContext *ctx, int framenr)
200 {
201         return fetch_from_fcurve(ctx->scaleinf, framenr, ctx, ctx->stab->scaleinf);
202 }
203
204 static void get_animated_target_pos(StabContext *ctx,
205                                     int framenr,
206                                             float target_pos[2])
207 {
208         target_pos[0] = fetch_from_fcurve(ctx->target_pos[0],
209                                           framenr,
210                                           ctx,
211                                           ctx->stab->target_pos[0]);
212         target_pos[1] = fetch_from_fcurve(ctx->target_pos[1],
213                                           framenr,
214                                           ctx,
215                                           ctx->stab->target_pos[1]);
216 }
217
218 static float get_animated_target_rot(StabContext *ctx, int framenr)
219 {
220         return fetch_from_fcurve(ctx->target_rot,
221                                  framenr,
222                                  ctx,
223                                  ctx->stab->target_rot);
224 }
225
226 static float get_animated_target_scale(StabContext *ctx, int framenr)
227 {
228         return fetch_from_fcurve(ctx->target_scale, framenr, ctx, ctx->stab->scale);
229 }
230
231 static float get_animated_weight(StabContext *ctx,
232                                  MovieTrackingTrack *track,
233                                  int framenr)
234 {
235         TrackStabilizationBase *working_data =
236                 access_stabilization_baseline_data(ctx, track);
237         if (working_data && working_data->track_weight_curve) {
238                 int scene_framenr = BKE_movieclip_remap_clip_to_scene_frame(ctx->clip,
239                                                                             framenr);
240                 return evaluate_fcurve(working_data->track_weight_curve, scene_framenr);
241         }
242         /* Use weight at global 'current frame' as fallback default. */
243         return track->weight_stab;
244 }
245
246 static void use_values_from_fcurves(StabContext *ctx, bool toggle)
247 {
248         if (ctx != NULL) {
249                 ctx->use_animation = toggle;
250         }
251 }
252
253
254 /* Prepare per call private working area.
255  * Used for access to possibly animated values: retrieve available F-curves.
256  */
257 static StabContext *initialize_stabilization_working_context(MovieClip *clip)
258 {
259         StabContext *ctx = MEM_callocN(sizeof(StabContext),
260                                        "2D stabilization animation runtime data");
261         ctx->clip = clip;
262         ctx->tracking = &clip->tracking;
263         ctx->stab = &clip->tracking.stabilization;
264         ctx->private_track_data = BLI_ghash_ptr_new(
265                  "2D stabilization per track private working data");
266         ctx->locinf        = retrieve_stab_animation(clip, "influence_location", 0);
267         ctx->rotinf        = retrieve_stab_animation(clip, "influence_rotation", 0);
268         ctx->scaleinf      = retrieve_stab_animation(clip, "influence_scale", 0);
269         ctx->target_pos[0] = retrieve_stab_animation(clip, "target_pos", 0);
270         ctx->target_pos[1] = retrieve_stab_animation(clip, "target_pos", 1);
271         ctx->target_rot    = retrieve_stab_animation(clip, "target_rot", 0);
272         ctx->target_scale  = retrieve_stab_animation(clip, "target_zoom", 0);
273         ctx->use_animation = true;
274         return ctx;
275 }
276
277 /* Discard all private working data attached to this call context.
278  * NOTE: We allocate the record for the per track baseline contribution
279  *       locally for each call context (i.e. call to
280  *       BKE_tracking_stabilization_data_get()
281  *       Thus it is correct to discard all allocations found within the
282  *       corresponding _local_ GHash
283  */
284 static void discard_stabilization_working_context(StabContext *ctx)
285 {
286         if (ctx != NULL) {
287                 BLI_ghash_free(ctx->private_track_data,
288                                NULL,
289                                discard_stabilization_baseline_data);
290                 MEM_freeN(ctx);
291         }
292 }
293
294 static bool is_init_for_stabilization(StabContext *ctx,
295                                       MovieTrackingTrack *track)
296 {
297         TrackStabilizationBase *working_data =
298                 access_stabilization_baseline_data(ctx, track);
299         return (working_data != NULL && working_data->is_init_for_stabilization);
300 }
301
302 static bool is_usable_for_stabilization(StabContext *ctx,
303                                         MovieTrackingTrack *track)
304 {
305         return (track->flag & TRACK_USE_2D_STAB) &&
306                is_init_for_stabilization(ctx, track);
307 }
308
309 static bool is_effectively_disabled(StabContext *ctx,
310                                     MovieTrackingTrack *track,
311                                     MovieTrackingMarker *marker)
312 {
313         return (marker->flag & MARKER_DISABLED) ||
314                (EPSILON_WEIGHT > get_animated_weight(ctx, track, marker->framenr));
315 }
316
317
318 static int search_closest_marker_index(MovieTrackingTrack *track,
319                                        int ref_frame)
320 {
321         MovieTrackingMarker *markers = track->markers;
322         int end = track->markersnr;
323         int i = track->last_marker;
324
325         i = MAX2(0, i);
326         i = MIN2(i, end - 1);
327         for ( ; i < end - 1 && markers[i].framenr <= ref_frame; ++i);
328         for ( ; 0 < i && markers[i].framenr > ref_frame; --i);
329
330         track->last_marker = i;
331         return i;
332 }
333
334 static void retrieve_next_higher_usable_frame(StabContext *ctx,
335                                               MovieTrackingTrack *track,
336                                               int i,
337                                               int ref_frame,
338                                               int *next_higher)
339 {
340         MovieTrackingMarker *markers = track->markers;
341         int end = track->markersnr;
342         BLI_assert(0 <= i && i < end);
343
344         while (i < end &&
345                (markers[i].framenr < ref_frame ||
346                 is_effectively_disabled(ctx, track, &markers[i])))
347         {
348                 ++i;
349         }
350         if (i < end && markers[i].framenr < *next_higher) {
351                 BLI_assert(markers[i].framenr >= ref_frame);
352                 *next_higher = markers[i].framenr;
353         }
354 }
355
356 static void retrieve_next_lower_usable_frame(StabContext *ctx,
357                                              MovieTrackingTrack *track,
358                                              int i,
359                                              int ref_frame,
360                                              int *next_lower)
361 {
362         MovieTrackingMarker *markers = track->markers;
363         BLI_assert(0 <= i && i < track->markersnr);
364         while (i >= 0 &&
365                (markers[i].framenr > ref_frame ||
366                 is_effectively_disabled(ctx, track, &markers[i])))
367         {
368                 --i;
369         }
370         if (0 <= i && markers[i].framenr > *next_lower) {
371                 BLI_assert(markers[i].framenr <= ref_frame);
372                 *next_lower = markers[i].framenr;
373         }
374 }
375
376 /* Find closest frames with usable stabilization data.
377  * A frame counts as _usable_ when there is at least one track marked for
378  * translation stabilization, which has an enabled tracking marker at this very
379  * frame. We search both for the next lower and next higher position, to allow
380  * the caller to interpolate gaps and to extrapolate at the ends of the
381  * definition range.
382  *
383  * NOTE: Regarding performance note that the individual tracks will cache the
384  *       last search position.
385  */
386 static void find_next_working_frames(StabContext *ctx,
387                                      int framenr,
388                                      int *next_lower,
389                                      int *next_higher)
390 {
391         for (MovieTrackingTrack *track = ctx->tracking->tracks.first;
392              track != NULL;
393              track = track->next)
394         {
395                 if (is_usable_for_stabilization(ctx, track)) {
396                         int startpoint = search_closest_marker_index(track, framenr);
397                         retrieve_next_higher_usable_frame(ctx,
398                                                           track,
399                                                           startpoint,
400                                                           framenr,
401                                                           next_higher);
402                         retrieve_next_lower_usable_frame(ctx,
403                                                          track,
404                                                          startpoint,
405                                                          framenr,
406                                                          next_lower);
407                 }
408         }
409 }
410
411
412 /* Find active (enabled) marker closest to the reference frame. */
413 static MovieTrackingMarker *get_closest_marker(StabContext *ctx,
414                                                MovieTrackingTrack *track,
415                                                int ref_frame)
416 {
417         int next_lower = MINAFRAME;
418         int next_higher = MAXFRAME;
419         int i = search_closest_marker_index(track, ref_frame);
420         retrieve_next_higher_usable_frame(ctx, track, i, ref_frame, &next_higher);
421         retrieve_next_lower_usable_frame(ctx, track, i, ref_frame, &next_lower);
422
423         if ((next_higher - ref_frame) < (ref_frame - next_lower)) {
424                 return BKE_tracking_marker_get_exact(track, next_higher);
425         }
426         else {
427                 return BKE_tracking_marker_get_exact(track, next_lower);
428         }
429 }
430
431
432 /* Retrieve tracking data, if available and applicable for this frame.
433  * The returned weight value signals the validity; data recorded for this
434  * tracking marker on the exact requested frame is output with the full weight
435  * of this track, while gaps in the data sequence cause the weight to go to zero.
436  */
437 static MovieTrackingMarker *get_tracking_data_point(
438         StabContext *ctx,
439         MovieTrackingTrack *track,
440         int framenr,
441         float *r_weight)
442 {
443         MovieTrackingMarker *marker = BKE_tracking_marker_get_exact(track, framenr);
444         if (marker != NULL && !(marker->flag & MARKER_DISABLED)) {
445                 *r_weight = get_animated_weight(ctx, track, framenr);
446                 return marker;
447         }
448         else {
449                 /* No marker at this frame (=gap) or marker disabled. */
450                 *r_weight = 0.0f;
451                 return NULL;
452         }
453 }
454
455 /* Calculate the contribution of a single track at the time position (frame) of
456  * the given marker. Each track has a local reference frame, which is as close
457  * as possible to the global anchor_frame. Thus the translation contribution is
458  * comprised of the offset relative to the image position at that reference
459  * frame, plus a guess of the contribution for the time span between the
460  * anchor_frame and the local reference frame of this track. The constant part
461  * of this contribution is precomputed initially. At the anchor_frame, by
462  * definition the contribution of all tracks is zero, keeping the frame in place.
463  *
464  * track_ref is per track baseline contribution at reference frame; filled in at
465  *           initialization
466  * marker is tracking data to use as contribution for current frame.
467  * result_offset is a total cumulated contribution of this track,
468  *               relative to the stabilization anchor_frame,
469  *               in normalized (0...1) coordinates.
470  */
471 static void translation_contribution(TrackStabilizationBase *track_ref,
472                                      MovieTrackingMarker *marker,
473                                      float result_offset[2])
474 {
475         add_v2_v2v2(result_offset,
476                     track_ref->stabilization_offset_base,
477                     marker->pos);
478 }
479
480 /* Similar to the ::translation_contribution(), the rotation contribution is
481  * comprised of the contribution by this individual track, and the averaged
482  * contribution from anchor_frame to the ref point of this track.
483  * - Contribution is in terms of angles, -pi < angle < +pi, and all averaging
484  *   happens in this domain.
485  * - Yet the actual measurement happens as vector between pivot and the current
486  *   tracking point
487  * - Currently we use the center of frame as approximation for the rotation pivot
488  *   point.
489  * - Moreover, the pivot point has to be compensated for the already determined
490  *   shift offset, in order to get the pure rotation around the pivot.
491  *   To turn this into a _contribution_, the likewise corrected angle at the
492  *   reference frame has to be subtracted, to get only the pure angle difference
493  *   this tracking point has captured.
494  * - To get from vectors to angles, we have to go through an arcus tangens,
495  *   which involves the issue of the definition range: the resulting angles will
496  *   flip by 360deg when the measured vector passes from the 2nd to the third
497  *   quadrant, thus messing up the average calculation. Since _any_ tracking
498  *   point might be used, these problems are quite common in practice.
499  * - Thus we perform the subtraction of the reference and the addition of the
500  *   baseline contribution in polar coordinates as simple addition of angles;
501  *   since these parts are fixed, we can bake them into a rotation matrix.
502  *   With this approach, the border of the arcus tangens definition range will
503  *   be reached only, when the _whole_ contribution approaches +- 180deg,
504  *   meaning we've already tilted the frame upside down. This situation is way
505  *   less common and can be tolerated.
506  * - As an additional feature, when activated, also changes in image scale
507  *   relative to the rotation center can be picked up. To handle those values
508  *   in the same framework, we average the scales as logarithms.
509  *
510  * aspect is a total aspect ratio of the undistorted image (includes fame and
511  * pixel aspect).
512  */
513 static void rotation_contribution(TrackStabilizationBase *track_ref,
514                                   MovieTrackingMarker *marker,
515                                   float aspect,
516                                   float target_pos[2],
517                                   float averaged_translation_contribution[2],
518                                   float *result_angle,
519                                   float *result_scale)
520 {
521         float len;
522         float pos[2];
523         float pivot[2];
524         copy_v2_fl(pivot, 0.5f);  /* Use center of frame as hard wired pivot. */
525         add_v2_v2(pivot, averaged_translation_contribution);
526         sub_v2_v2(pivot, target_pos);
527         sub_v2_v2v2(pos, marker->pos, pivot);
528
529         pos[0] *= aspect;
530         mul_m2v2(track_ref->stabilization_rotation_base, pos);
531
532         *result_angle = atan2f(pos[1],pos[0]);
533
534         len = len_v2(pos) + SCALE_ERROR_LIMIT_BIAS;
535         *result_scale = len * track_ref->stabilization_scale_base;
536         BLI_assert(0.0 < *result_scale);
537 }
538
539
540 /* Weighted average of the per track cumulated contributions at given frame.
541  * Returns truth if all desired calculations could be done and all averages are
542  * available.
543  *
544  * NOTE: Even if the result is not `true`, the returned translation and angle
545  *       are always sensible and as good as can be. Especially in the
546  *       initialization phase we might not be able to get any average (yet) or
547  *       get only a translation value. Since initialization visits tracks in a
548  *       specific order, starting from anchor_frame, the result is logically
549  *       correct non the less. But under normal operation conditions,
550  *       a result of `false` should disable the stabilization function
551  */
552 static bool average_track_contributions(StabContext *ctx,
553                                         int framenr,
554                                         float aspect,
555                                         float r_translation[2],
556                                         float *r_angle,
557                                         float *r_scale_step)
558 {
559         bool ok;
560         float weight_sum;
561         MovieTrackingTrack *track;
562         MovieTracking *tracking = ctx->tracking;
563         MovieTrackingStabilization *stab = &tracking->stabilization;
564         BLI_assert(stab->flag & TRACKING_2D_STABILIZATION);
565
566         zero_v2(r_translation);
567         *r_scale_step = 0.0f;  /* logarithm */
568         *r_angle = 0.0f;
569
570         ok = false;
571         weight_sum = 0.0f;
572         for (track = tracking->tracks.first; track; track = track->next) {
573                 if (!is_init_for_stabilization(ctx, track)) {
574                         continue;
575                 }
576                 if (track->flag & TRACK_USE_2D_STAB) {
577                         float weight = 0.0f;
578                         MovieTrackingMarker *marker = get_tracking_data_point(ctx,
579                                                                               track,
580                                                                               framenr,
581                                                                               &weight);
582                         if (marker) {
583                                 TrackStabilizationBase *stabilization_base =
584                                        access_stabilization_baseline_data(ctx, track);
585                                 BLI_assert(stabilization_base != NULL);
586                                 float offset[2];
587                                 weight_sum += weight;
588                                 translation_contribution(stabilization_base, marker, offset);
589                                 mul_v2_fl(offset, weight);
590                                 add_v2_v2(r_translation, offset);
591                                 ok |= (weight_sum > EPSILON_WEIGHT);
592                         }
593                 }
594         }
595         if (!ok) {
596                 return false;
597         }
598
599         r_translation[0] /= weight_sum;
600         r_translation[1] /= weight_sum;
601
602         if (!(stab->flag & TRACKING_STABILIZE_ROTATION)) {
603                 return ok;
604         }
605
606         ok = false;
607         weight_sum = 0.0f;
608         for (track = tracking->tracks.first; track; track = track->next) {
609                 if (!is_init_for_stabilization(ctx, track)) {
610                         continue;
611                 }
612                 if (track->flag & TRACK_USE_2D_STAB_ROT) {
613                         float weight = 0.0f;
614                         MovieTrackingMarker *marker = get_tracking_data_point(ctx,
615                                                                               track,
616                                                                               framenr,
617                                                                               &weight);
618                         if (marker) {
619                                 TrackStabilizationBase *stabilization_base =
620                                         access_stabilization_baseline_data(ctx, track);
621                                 BLI_assert(stabilization_base != NULL);
622                                 float rotation, scale;
623                                 float target_pos[2];
624                                 weight_sum += weight;
625                                 get_animated_target_pos(ctx, framenr, target_pos);
626                                 rotation_contribution(stabilization_base,
627                                                       marker,
628                                                       aspect,
629                                                       target_pos,
630                                                       r_translation,
631                                                       &rotation,
632                                                       &scale);
633                                 *r_angle += rotation * weight;
634                                 if (stab->flag & TRACKING_STABILIZE_SCALE) {
635                                         *r_scale_step += logf(scale) * weight;
636                                 }
637                                 else {
638                                         *r_scale_step = 0;
639                                 }
640                                 ok |= (weight_sum > EPSILON_WEIGHT);
641                         }
642                 }
643         }
644         if (ok) {
645                 *r_scale_step /= weight_sum;
646                 *r_angle /= weight_sum;
647         }
648         else {
649                 /* We reach this point because translation could be calculated,
650                  * but rotation/scale found no data to work on.
651                  */
652                 *r_scale_step = 0.0f;
653                 *r_angle = 0.0f;
654         }
655         return true;
656 }
657
658
659 /* Linear interpolation of data retrieved at two measurement points.
660  * This function is used to fill gaps in the middle of the covered area,
661  * at frames without any usable tracks for stabilization.
662  *
663  * framenr is a position to interpolate for.
664  * frame_a is a valid measurement point below framenr
665  * frame_b is a valid measurement point above framenr
666  * Returns truth if both measurements could actually be retrieved.
667  * Otherwise output parameters remain unaltered
668  */
669 static bool interpolate_averaged_track_contributions(StabContext *ctx,
670                                                      int framenr,
671                                                      int frame_a,
672                                                      int frame_b,
673                                                      float aspect,
674                                                      float translation[2],
675                                                      float *r_angle,
676                                                      float *r_scale_step)
677 {
678         float t, s;
679         float trans_a[2], trans_b[2];
680         float angle_a, angle_b;
681         float scale_a, scale_b;
682         bool success = false;
683
684         BLI_assert(frame_a <= frame_b);
685         BLI_assert(frame_a <= framenr);
686         BLI_assert(framenr <= frame_b);
687
688         t = ((float)framenr - frame_a) / (frame_b - frame_a);
689         s = 1.0f - t;
690
691         success = average_track_contributions(ctx, frame_a, aspect, trans_a, &angle_a, &scale_a);
692         if (!success) {
693                 return false;
694         }
695         success = average_track_contributions(ctx, frame_b, aspect, trans_b, &angle_b, &scale_b);
696         if (!success) {
697                 return false;
698         }
699
700         interp_v2_v2v2(translation, trans_a, trans_b, t);
701         *r_scale_step = s * scale_a + t * scale_b;
702         *r_angle = s * angle_a + t * angle_b;
703         return true;
704 }
705
706
707 /* Reorder tracks starting with those providing a tracking data frame
708  * closest to the global anchor_frame. Tracks with a gap at anchor_frame or
709  * starting farer away from anchor_frame altogether will be visited later.
710  * This allows to build up baseline contributions incrementally.
711  *
712  * order is an array for sorting the tracks. Must be of suitable size to hold
713  * all tracks.
714  * Returns number of actually usable tracks, can be less than the overall number
715  * of tracks.
716  *
717  * NOTE: After returning, the order array holds entries up to the number of
718  *       usable tracks, appropriately sorted starting with the closest tracks.
719  *       Initialization includes disabled tracks, since they might be enabled
720  *       through automation later.
721  */
722 static int establish_track_initialization_order(StabContext *ctx,
723                                                 TrackInitOrder *order)
724 {
725         size_t tracknr = 0;
726         MovieTrackingTrack *track;
727         MovieTracking *tracking = ctx->tracking;
728         int anchor_frame = tracking->stabilization.anchor_frame;
729
730         for (track = tracking->tracks.first; track != NULL; track = track->next) {
731                 MovieTrackingMarker *marker;
732                 order[tracknr].data = track;
733                 marker = get_closest_marker(ctx, track, anchor_frame);
734                 if (marker != NULL &&
735                         (track->flag & (TRACK_USE_2D_STAB | TRACK_USE_2D_STAB_ROT)))
736                 {
737                         order[tracknr].sort_value = abs(marker->framenr - anchor_frame);
738                         order[tracknr].reference_frame = marker->framenr;
739                         ++tracknr;
740                 }
741         }
742         if (tracknr) {
743                 qsort(order, tracknr, sizeof(TrackInitOrder), BLI_sortutil_cmp_int);
744         }
745         return tracknr;
746 }
747
748
749 /* Setup the constant part of this track's contribution to the determined frame
750  * movement. Tracks usually don't provide tracking data for every frame. Thus,
751  * for determining data at a given frame, we split up the contribution into a
752  * part covered by actual measurements on this track, and the initial gap
753  * between this track's reference frame and the global anchor_frame.
754  * The (missing) data for the gap can be substituted by the average offset
755  * observed by the other tracks covering the gap. This approximation doesn't
756  * introduce wrong data, but it records data with incorrect weight. A totally
757  * correct solution would require us to average the contribution per frame, and
758  * then integrate stepwise over all frames -- which of course would be way more
759  * expensive, especially for longer clips. To the contrary, our solution
760  * cumulates the total contribution per track and averages afterwards over all
761  * tracks; it can thus be calculated just based on the data of a single frame,
762  * plus the "baseline" for the reference frame, which is what we are computing
763  * here.
764  *
765  * Since we're averaging _contributions_, we have to calculate the _difference_
766  * of the measured position at current frame and the position at the reference
767  * frame. But the "reference" part of this difference is constant and can thus
768  * be packed together with the baseline contribution into a single precomputed
769  * vector per track.
770  *
771  * In case of the rotation contribution, the principle is the same, but we have
772  * to compensate for the already determined translation and measure the pure
773  * rotation, simply because this is how we model the offset: shift plus rotation
774  * around the shifted rotation center. To circumvent problems with the
775  * definition range of the arcus tangens function, we perform this baseline
776  * addition and reference angle subtraction in polar coordinates and bake this
777  * operation into a precomputed rotation matrix.
778  *
779  * track is a track to be initialized to initialize
780  * reference_frame is a local frame for this track, the closest pick to the
781  *                 global anchor_frame.
782  * aspect is a total aspect ratio of the undistorted image (includes fame and
783  *        pixel aspect).
784  * target_pos is a possibly animated target position as set by the user for
785  *            the reference_frame
786  * average_translation is a value observed by the _other_ tracks for the gap
787  *                     between reference_frame and anchor_frame. This
788  *                     average must not contain contributions of frames
789  *                     not yet initialized
790  * average_angle in a similar way, the rotation value observed by the
791  *               _other_ tracks.
792  * average_scale_step is an image scale factor observed on average by the other
793  *                    tracks for this frame. This value is recorded and
794  *                    averaged as logarithm. The recorded scale changes
795  *                    are damped for very small contributions, to limit
796  *                    the effect of probe points approaching the pivot
797  *                    too closely.
798  *
799  * NOTE: when done, this track is marked as initialized
800  */
801 static void initialize_track_for_stabilization(StabContext *ctx,
802                                                MovieTrackingTrack *track,
803                                                int reference_frame,
804                                                float aspect,
805                                                const float target_pos[2],
806                                                const float average_translation[2],
807                                                float average_angle,
808                                                float average_scale_step)
809 {
810         float pos[2], angle, len;
811         float pivot[2];
812         TrackStabilizationBase *local_data =
813                 access_stabilization_baseline_data(ctx, track);
814         MovieTrackingMarker *marker =
815                 BKE_tracking_marker_get_exact(track, reference_frame);
816         /* Logic for initialization order ensures there *is* a marker on that
817          * very frame.
818          */
819         BLI_assert(marker != NULL);
820         BLI_assert(local_data != NULL);
821
822         /* Per track baseline value for translation. */
823         sub_v2_v2v2(local_data->stabilization_offset_base,
824                     average_translation,
825                     marker->pos);
826
827         /* Per track baseline value for rotation. */
828         copy_v2_fl(pivot, 0.5f);  /* Use center of frame as hard wired pivot. */
829         add_v2_v2(pivot, average_translation);
830         sub_v2_v2(pivot, target_pos);
831         sub_v2_v2v2(pos, marker->pos, pivot);
832
833         pos[0] *= aspect;
834         angle = average_angle - atan2f(pos[1],pos[0]);
835         rotate_m2(local_data->stabilization_rotation_base, angle);
836
837         /* Per track baseline value for zoom. */
838         len = len_v2(pos) + SCALE_ERROR_LIMIT_BIAS;
839         local_data->stabilization_scale_base = expf(average_scale_step) / len;
840
841         local_data->is_init_for_stabilization = true;
842 }
843
844
845 static void initialize_all_tracks(StabContext *ctx, float aspect)
846 {
847         size_t i, track_cnt = 0;
848         MovieClip *clip = ctx->clip;
849         MovieTracking *tracking = ctx->tracking;
850         MovieTrackingTrack *track;
851         TrackInitOrder *order;
852
853         /* Attempt to start initialization at anchor_frame.
854          * By definition, offset contribution is zero there.
855          */
856         int reference_frame = tracking->stabilization.anchor_frame;
857         float average_angle=0, average_scale_step=0;
858         float average_translation[2];
859         float target_pos_at_ref_frame[2];
860         zero_v2(target_pos_at_ref_frame);
861         zero_v2(average_translation);
862
863         /* Initialize private working data. */
864         for (track = tracking->tracks.first; track != NULL; track = track->next) {
865                 TrackStabilizationBase *local_data =
866                         access_stabilization_baseline_data(ctx, track);
867                 if (!local_data) {
868                         local_data = MEM_callocN(sizeof(TrackStabilizationBase),
869                                                  "2D stabilization per track baseline data");
870                         attach_stabilization_baseline_data(ctx, track, local_data);
871                 }
872                 BLI_assert(local_data != NULL);
873                 local_data->track_weight_curve = retrieve_track_weight_animation(clip,
874                                                                                  track);
875                 local_data->is_init_for_stabilization = false;
876
877                 ++track_cnt;
878         }
879         if (!track_cnt) {
880                 return;
881         }
882
883         order = MEM_mallocN(track_cnt * sizeof(TrackInitOrder),
884                             "stabilization track order");
885         if (!order) {
886                 return;
887         }
888
889         track_cnt = establish_track_initialization_order(ctx, order);
890         if (track_cnt == 0) {
891                 goto cleanup;
892         }
893
894         for (i = 0; i < track_cnt; ++i) {
895                 track = order[i].data;
896                 if (reference_frame != order[i].reference_frame) {
897                         reference_frame = order[i].reference_frame;
898                         average_track_contributions(ctx,
899                                                     reference_frame,
900                                                     aspect,
901                                                     average_translation,
902                                                     &average_angle,
903                                                     &average_scale_step);
904                         get_animated_target_pos(ctx,
905                                                 reference_frame,
906                                                 target_pos_at_ref_frame);
907                 }
908                 initialize_track_for_stabilization(ctx,
909                                                    track,
910                                                    reference_frame,
911                                                    aspect,
912                                                    target_pos_at_ref_frame,
913                                                    average_translation,
914                                                    average_angle,
915                                                    average_scale_step);
916         }
917
918 cleanup:
919         MEM_freeN(order);
920 }
921
922
923 /* Retrieve the measurement of frame movement by averaging contributions of
924  * active tracks.
925  *
926  * translation is a measurement in normalized 0..1 coordinates.
927  * angle is a measurement in radians -pi..+pi counter clockwise relative to
928  *       translation compensated frame center
929  * scale_step is a measurement of image scale changes, in logarithmic scale
930  *            (zero means scale == 1)
931  * Returns calculation enabled and all data retrieved as expected for this frame.
932  *
933  * NOTE: when returning `false`, output parameters are reset to neutral values.
934  */
935 static bool stabilization_determine_offset_for_frame(StabContext *ctx,
936                                                      int framenr,
937                                                      float aspect,
938                                                      float r_translation[2],
939                                                      float *r_angle,
940                                                      float *r_scale_step)
941 {
942         bool success = false;
943
944         /* Early output if stabilization is disabled. */
945         if ((ctx->stab->flag & TRACKING_2D_STABILIZATION) == 0) {
946                 zero_v2(r_translation);
947                 *r_scale_step = 0.0f;
948                 *r_angle = 0.0f;
949                 return false;
950         }
951
952         success = average_track_contributions(ctx,
953                                               framenr,
954                                               aspect,
955                                               r_translation,
956                                               r_angle,
957                                               r_scale_step);
958         if (!success) {
959                 /* Try to hold extrapolated settings beyond the definition range
960                  * and to interpolate in gaps without any usable tracking data
961                  * to prevent sudden jump to image zero position.
962                  */
963                 int next_lower = MINAFRAME;
964                 int next_higher = MAXFRAME;
965                 use_values_from_fcurves(ctx, true);
966                 find_next_working_frames(ctx, framenr, &next_lower, &next_higher);
967                 if (next_lower >= MINFRAME && next_higher < MAXFRAME) {
968                         success = interpolate_averaged_track_contributions(ctx,
969                                                                            framenr,
970                                                                            next_lower,
971                                                                            next_higher,
972                                                                            aspect,
973                                                                            r_translation,
974                                                                            r_angle,
975                                                                            r_scale_step);
976                 }
977                 else if (next_higher < MAXFRAME) {
978                         /* Before start of stabilized range: extrapolate start point
979                          * settings.
980                          */
981                         success = average_track_contributions(ctx,
982                                                               next_higher,
983                                                               aspect,
984                                                               r_translation,
985                                                               r_angle,
986                                                               r_scale_step);
987                 }
988                 else if (next_lower >= MINFRAME) {
989                         /* After end of stabilized range: extrapolate end point settings. */
990                         success = average_track_contributions(ctx,
991                                                               next_lower,
992                                                               aspect,
993                                                               r_translation,
994                                                               r_angle,
995                                                               r_scale_step);
996                 }
997                 use_values_from_fcurves(ctx, false);
998         }
999         return success;
1000 }
1001
1002 /* Calculate stabilization data (translation, scale and rotation) from given raw
1003  * measurements. Result is in absolute image dimensions (expanded image, square
1004  * pixels), includes automatic or manual scaling and compensates for a target
1005  * frame position, if given.
1006  *
1007  * size is a size of the expanded image, the width in pixels is size * aspect.
1008  * aspect is a ratio (width / height) of the effective canvas (square pixels).
1009  * do_compensate denotes whether to actually output values necessary to
1010  *               _compensate_ the determined frame movement.
1011  *               Otherwise, the effective target movement is returned.
1012  */
1013 static void stabilization_calculate_data(StabContext *ctx,
1014                                          int framenr,
1015                                          int size,
1016                                          float aspect,
1017                                          bool do_compensate,
1018                                          float scale_step,
1019                                          float r_translation[2],
1020                                          float *r_scale,
1021                                          float *r_angle)
1022 {
1023         float target_pos[2], target_scale;
1024         float scaleinf = get_animated_scaleinf(ctx, framenr);
1025
1026         if (ctx->stab->flag & TRACKING_STABILIZE_SCALE) {
1027                 *r_scale = expf(scale_step * scaleinf);  /* Averaged in log scale */
1028         } else {
1029                 *r_scale = 1.0f;
1030         }
1031
1032         mul_v2_fl(r_translation, get_animated_locinf(ctx, framenr));
1033         *r_angle *= get_animated_rotinf(ctx, framenr);
1034
1035         /* Compensate for a target frame position.
1036          * This allows to follow tracking / panning shots in a semi manual fashion,
1037          * when animating the settings for the target frame position.
1038          */
1039         get_animated_target_pos(ctx, framenr, target_pos);
1040         sub_v2_v2(r_translation, target_pos);
1041         *r_angle -= get_animated_target_rot(ctx,framenr);
1042         target_scale = get_animated_target_scale(ctx,framenr);
1043         if (target_scale != 0.0f) {
1044                 *r_scale /= target_scale;
1045                 /* target_scale is an expected/intended reference zoom value*/
1046         }
1047
1048         /* Convert from relative to absolute coordinates, square pixels. */
1049         r_translation[0] *= (float)size * aspect;
1050         r_translation[1] *= (float)size;
1051
1052         /* Output measured data, or inverse of the measured values for
1053          * compensation?
1054          */
1055         if (do_compensate) {
1056                 mul_v2_fl(r_translation, -1.0f);
1057                 *r_angle *= -1.0f;
1058                 if (*r_scale != 0.0f) {
1059                         *r_scale = 1.0f / *r_scale;
1060                 }
1061         }
1062 }
1063
1064
1065 /* Determine the inner part of the frame, which is always safe to use.
1066  * When enlarging the image by the inverse of this factor, any black areas
1067  * appearing due to frame translation and rotation will be removed.
1068  *
1069  * NOTE: When calling this function, basic initialization of tracks must be
1070  *       done already
1071  */
1072 static void stabilization_determine_safe_image_area(StabContext *ctx,
1073                                                     int size,
1074                                                     float image_aspect)
1075 {
1076         MovieTrackingStabilization *stab = ctx->stab;
1077         float pixel_aspect = ctx->tracking->camera.pixel_aspect;
1078
1079         int sfra = INT_MAX, efra = INT_MIN, cfra;
1080         float scale = 1.0f, scale_step = 0.0f;
1081         MovieTrackingTrack *track;
1082         stab->scale = 1.0f;
1083
1084         /* Calculate maximal frame range of tracks where stabilization is active. */
1085         for (track = ctx->tracking->tracks.first; track; track = track->next) {
1086                 if ((track->flag & TRACK_USE_2D_STAB) ||
1087                         ((stab->flag & TRACKING_STABILIZE_ROTATION) &&
1088                          (track->flag & TRACK_USE_2D_STAB_ROT)))
1089                 {
1090                         int first_frame = track->markers[0].framenr;
1091                         int last_frame  = track->markers[track->markersnr - 1].framenr;
1092                         sfra = min_ii(sfra, first_frame);
1093                         efra = max_ii(efra, last_frame);
1094                 }
1095         }
1096
1097         /* For every frame we calculate scale factor needed to eliminate black border area
1098          * and choose largest scale factor as final one.
1099          */
1100         for (cfra = sfra; cfra <= efra; cfra++) {
1101                 float translation[2], angle, tmp_scale;
1102                 int i;
1103                 float mat[4][4];
1104                 float points[4][2] = {{0.0f, 0.0f},
1105                                       {0.0f, size},
1106                                       {image_aspect * size, size},
1107                                       {image_aspect * size, 0.0f}};
1108                 float si, co;
1109                 bool do_compensate = true;
1110
1111                 stabilization_determine_offset_for_frame(ctx,
1112                                                          cfra,
1113                                                          image_aspect,
1114                                                          translation,
1115                                                          &angle,
1116                                                          &scale_step);
1117                 stabilization_calculate_data(ctx,
1118                                              cfra,
1119                                              size,
1120                                              image_aspect,
1121                                              do_compensate,
1122                                              scale_step,
1123                                              translation,
1124                                              &tmp_scale,
1125                                              &angle);
1126
1127                 BKE_tracking_stabilization_data_to_mat4(size * image_aspect,
1128                                                         size,
1129                                                         pixel_aspect,
1130                                                         translation,
1131                                                         1.0f,
1132                                                         angle,
1133                                                         mat);
1134
1135                 si = sinf(angle);
1136                 co = cosf(angle);
1137
1138                 /* Investigate the transformed border lines for this frame;
1139                  * find out, where it cuts the original frame.
1140                  */
1141                 for (i = 0; i < 4; i++) {
1142                         int j;
1143                         float a[3] = {0.0f, 0.0f, 0.0f},
1144                               b[3] = {0.0f, 0.0f, 0.0f};
1145
1146                         copy_v2_v2(a, points[i]);
1147                         copy_v2_v2(b, points[(i + 1) % 4]);
1148                         a[2] = b[2] = 0.0f;
1149
1150                         mul_m4_v3(mat, a);
1151                         mul_m4_v3(mat, b);
1152
1153                         for (j = 0; j < 4; j++) {
1154                                 float point[3] = {points[j][0], points[j][1], 0.0f};
1155                                 float v1[3], v2[3];
1156
1157                                 sub_v3_v3v3(v1, b, a);
1158                                 sub_v3_v3v3(v2, point, a);
1159
1160                                 if (cross_v2v2(v1, v2) >= 0.0f) {
1161                                         const float rot_dx[4][2] = {{1.0f, 0.0f},
1162                                                                     {0.0f, -1.0f},
1163                                                                     {-1.0f, 0.0f},
1164                                                                     {0.0f, 1.0f}};
1165                                         const float rot_dy[4][2] = {{0.0f, 1.0f},
1166                                                                     {1.0f, 0.0f},
1167                                                                     {0.0f, -1.0f},
1168                                                                     {-1.0f, 0.0f}};
1169
1170                                         float dx = translation[0] * rot_dx[j][0] +
1171                                                    translation[1] * rot_dx[j][1],
1172                                               dy = translation[0] * rot_dy[j][0] +
1173                                                    translation[1] * rot_dy[j][1];
1174
1175                                         float w, h, E, F, G, H, I, J, K, S;
1176
1177                                         if (j % 2) {
1178                                                 w = (float)size / 2.0f;
1179                                                 h = image_aspect*size / 2.0f;
1180                                         }
1181                                         else {
1182                                                 w = image_aspect*size / 2.0f;
1183                                                 h = (float)size / 2.0f;
1184                                         }
1185
1186                                         E = -w * co + h * si;
1187                                         F = -h * co - w * si;
1188
1189                                         if ((i % 2) == (j % 2)) {
1190                                                 G = -w * co - h * si;
1191                                                 H = h * co - w * si;
1192                                         }
1193                                         else {
1194                                                 G = w * co + h * si;
1195                                                 H = -h * co + w * si;
1196                                         }
1197
1198                                         I = F - H;
1199                                         J = G - E;
1200                                         K = G * F - E * H;
1201
1202                                         S = (-w * I - h * J) / (dx * I + dy * J + K);
1203
1204                                         scale = max_ff(scale, S);
1205                                 }
1206                         }
1207                 }
1208         }
1209
1210         stab->scale = scale;
1211
1212         if (stab->maxscale > 0.0f) {
1213                 stab->scale = min_ff(stab->scale, stab->maxscale);
1214         }
1215 }
1216
1217
1218 /* Prepare working data and determine reference point for each track.
1219  *
1220  * NOTE: These calculations _could_ be cached and reused for all frames of the
1221  *       same clip. However, since proper initialization depends on (weight)
1222  *       animation and setup of tracks, ensuring consistency of cached init data
1223  *       turns out to be tricky, hard to maintain and generally not worth the
1224  *       effort. Thus we'll re-initialize on every frame.
1225  */
1226 static StabContext *init_stabilizer(MovieClip *clip, int width, int height)
1227 {
1228         MovieTracking *tracking = &clip->tracking;
1229         MovieTrackingStabilization *stab = &tracking->stabilization;
1230         float pixel_aspect = tracking->camera.pixel_aspect;
1231         float aspect = (float)width * pixel_aspect / height;
1232         int size = height;
1233
1234         StabContext *ctx = initialize_stabilization_working_context(clip);
1235         BLI_assert(ctx != NULL);
1236         initialize_all_tracks(ctx, aspect);
1237         if (stab->flag & TRACKING_AUTOSCALE) {
1238                 stabilization_determine_safe_image_area(ctx, size, aspect);
1239         }
1240         /* By default, just use values for the global current frame. */
1241         use_values_from_fcurves(ctx, false);
1242         return ctx;
1243 }
1244
1245
1246 /* === public interface functions === */
1247
1248 /* Get stabilization data (translation, scaling and angle) for a given frame.
1249  * Returned data describes how to compensate the detected movement, but with any
1250  * chosen scale factor already applied and any target frame position already
1251  * compensated. In case stabilization fails or is disabled, neutral values are
1252  * returned.
1253  *
1254  * framenr is a frame number, relative to the clip (not relative to the scene
1255  *         timeline)
1256  * width is an effective width of the canvas (square pixels), used to scale the
1257  *       determined translation
1258  *
1259  * Outputs:
1260  * - translation of the lateral shift, absolute canvas coordinates
1261  *   (square pixels).
1262  * - scale of the scaling to apply
1263  * - angle of the rotation angle, relative to the frame center
1264  */
1265 /* TODO(sergey): Use r_ prefix for output parameters here. */
1266 void BKE_tracking_stabilization_data_get(MovieClip *clip,
1267                                          int framenr,
1268                                          int width,
1269                                          int height,
1270                                          float translation[2],
1271                                          float *scale,
1272                                          float *angle)
1273 {
1274         StabContext *ctx = NULL;
1275         MovieTracking *tracking = &clip->tracking;
1276         bool enabled = (tracking->stabilization.flag & TRACKING_2D_STABILIZATION);
1277         /* Might become a parameter of a stabilization compositor node. */
1278         bool do_compensate = true;
1279         float scale_step = 0.0f;
1280         float pixel_aspect = tracking->camera.pixel_aspect;
1281         float aspect = (float)width * pixel_aspect / height;
1282         int size = height;
1283
1284         if (enabled) {
1285                 ctx = init_stabilizer(clip, width, height);
1286         }
1287
1288         if (enabled &&
1289                 stabilization_determine_offset_for_frame(ctx,
1290                                                          framenr,
1291                                                          aspect,
1292                                                          translation,
1293                                                          angle,
1294                                                          &scale_step))
1295         {
1296                 stabilization_calculate_data(ctx,
1297                                              framenr,
1298                                              size,
1299                                              aspect,
1300                                              do_compensate,
1301                                              scale_step,
1302                                              translation,
1303                                              scale,
1304                                              angle);
1305         }
1306         else {
1307                 zero_v2(translation);
1308                 *scale = 1.0f;
1309                 *angle = 0.0f;
1310         }
1311         discard_stabilization_working_context(ctx);
1312 }
1313
1314 /* Stabilize given image buffer using stabilization data for a specified
1315  * frame number.
1316  *
1317  * NOTE: frame number should be in clip space, not scene space.
1318  */
1319 /* TODO(sergey): Use r_ prefix for output parameters here. */
1320 ImBuf *BKE_tracking_stabilize_frame(MovieClip *clip,
1321                                     int framenr,
1322                                     ImBuf *ibuf,
1323                                     float translation[2],
1324                                     float *scale,
1325                                     float *angle)
1326 {
1327         float tloc[2], tscale, tangle;
1328         MovieTracking *tracking = &clip->tracking;
1329         MovieTrackingStabilization *stab = &tracking->stabilization;
1330         ImBuf *tmpibuf;
1331         int width = ibuf->x, height = ibuf->y;
1332         float pixel_aspect = tracking->camera.pixel_aspect;
1333         float mat[4][4];
1334         int j, filter = tracking->stabilization.filter;
1335         void (*interpolation)(struct ImBuf *, struct ImBuf *, float, float, int, int) = NULL;
1336         int ibuf_flags;
1337
1338         if (translation)
1339                 copy_v2_v2(tloc, translation);
1340
1341         if (scale)
1342                 tscale = *scale;
1343
1344         /* Perform early output if no stabilization is used. */
1345         if ((stab->flag & TRACKING_2D_STABILIZATION) == 0) {
1346                 if (translation)
1347                         zero_v2(translation);
1348
1349                 if (scale)
1350                         *scale = 1.0f;
1351
1352                 if (angle)
1353                         *angle = 0.0f;
1354
1355                 return ibuf;
1356         }
1357
1358         /* Allocate frame for stabilization result. */
1359         ibuf_flags = 0;
1360         if (ibuf->rect)
1361                 ibuf_flags |= IB_rect;
1362         if (ibuf->rect_float)
1363                 ibuf_flags |= IB_rectfloat;
1364
1365         tmpibuf = IMB_allocImBuf(ibuf->x, ibuf->y, ibuf->planes, ibuf_flags);
1366
1367         /* Calculate stabilization matrix. */
1368         BKE_tracking_stabilization_data_get(clip, framenr, width, height, tloc, &tscale, &tangle);
1369         BKE_tracking_stabilization_data_to_mat4(ibuf->x, ibuf->y, pixel_aspect, tloc, tscale, tangle, mat);
1370
1371         /* The following code visits each nominal target grid position
1372          * and picks interpolated data "backwards" from source.
1373          * thus we need the inverse of the transformation to apply. */
1374         invert_m4(mat);
1375
1376         if (filter == TRACKING_FILTER_NEAREST)
1377                 interpolation = nearest_interpolation;
1378         else if (filter == TRACKING_FILTER_BILINEAR)
1379                 interpolation = bilinear_interpolation;
1380         else if (filter == TRACKING_FILTER_BICUBIC)
1381                 interpolation = bicubic_interpolation;
1382         else
1383                 /* fallback to default interpolation method */
1384                 interpolation = nearest_interpolation;
1385
1386         /* This function is only used for display in clip editor and
1387          * sequencer only, which would only benefit of using threads
1388          * here.
1389          *
1390          * But need to keep an eye on this if the function will be
1391          * used in other cases.
1392          */
1393 #pragma omp parallel for if (tmpibuf->y > 128)
1394         for (j = 0; j < tmpibuf->y; j++) {
1395                 int i;
1396                 for (i = 0; i < tmpibuf->x; i++) {
1397                         float vec[3] = {i, j, 0.0f};
1398
1399                         mul_v3_m4v3(vec, mat, vec);
1400
1401                         interpolation(ibuf, tmpibuf, vec[0], vec[1], i, j);
1402                 }
1403         }
1404
1405         if (tmpibuf->rect_float)
1406                 tmpibuf->userflags |= IB_RECT_INVALID;
1407
1408         if (translation)
1409                 copy_v2_v2(translation, tloc);
1410
1411         if (scale)
1412                 *scale = tscale;
1413
1414         if (angle)
1415                 *angle = tangle;
1416
1417         return tmpibuf;
1418 }
1419
1420
1421 /* Build a 4x4 transformation matrix based on the given 2D stabilization data.
1422  * mat is a 4x4 matrix in homogeneous coordinates, adapted to the
1423  *     final image buffer size and compensated for pixel aspect ratio,
1424  *     ready for direct OpenGL drawing.
1425  *
1426  * TODO(sergey): The signature of this function should be changed. we actually
1427  *               don't need the dimensions of the image buffer. Instead we
1428  *               should consider to provide the pivot point of the rotation as a
1429  *               further stabilization data parameter.
1430  */
1431 void BKE_tracking_stabilization_data_to_mat4(int buffer_width,
1432                                              int buffer_height,
1433                                              float pixel_aspect,
1434                                              float translation[2], float scale, float angle,
1435                                              float mat[4][4])
1436 {
1437         float translation_mat[4][4], rotation_mat[4][4], scale_mat[4][4],
1438               pivot_mat[4][4], inv_pivot_mat[4][4],
1439               aspect_mat[4][4], inv_aspect_mat[4][4];
1440         float scale_vector[3] = {scale, scale, 1.0f};
1441
1442         float pivot[2]; /* XXX this should be a parameter, it is part of the stabilization data */
1443
1444         /* Use the motion compensated image center as rotation center.
1445          * This is not 100% correct, but reflects the way the rotation data was
1446          * measured. Actually we'd need a way to find a good pivot, and use that
1447          * both for averaging and for compensation.
1448          */
1449         /* TODO(sergey) pivot shouldn't be calculated here, rather received
1450          * as a parameter.
1451          */
1452         pivot[0] = pixel_aspect * buffer_width / 2.0f - translation[0];
1453         pivot[1] = (float)buffer_height        / 2.0f - translation[1];
1454
1455         unit_m4(translation_mat);
1456         unit_m4(rotation_mat);
1457         unit_m4(scale_mat);
1458         unit_m4(aspect_mat);
1459         unit_m4(pivot_mat);
1460         unit_m4(inv_pivot_mat);
1461
1462         /* aspect ratio correction matrix */
1463         aspect_mat[0][0] /= pixel_aspect;
1464         invert_m4_m4(inv_aspect_mat, aspect_mat);
1465
1466         add_v2_v2(pivot_mat[3],     pivot);
1467         sub_v2_v2(inv_pivot_mat[3], pivot);
1468
1469         size_to_mat4(scale_mat, scale_vector);       /* scale matrix */
1470         add_v2_v2(translation_mat[3], translation);  /* translation matrix */
1471         rotate_m4(rotation_mat, 'Z', angle);         /* rotation matrix */
1472
1473         /* compose transformation matrix */
1474         mul_m4_series(mat, aspect_mat, translation_mat,
1475                            pivot_mat, scale_mat, rotation_mat, inv_pivot_mat,
1476                            inv_aspect_mat);
1477 }