580fd260e15c63cf6698ea56af403cfea7f0b427
[blender.git] / extern / ceres / internal / ceres / gradient_checking_cost_function.cc
1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2015 Google Inc. All rights reserved.
3 // http://ceres-solver.org/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 //   this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 //   this list of conditions and the following disclaimer in the documentation
12 //   and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 //   used to endorse or promote products derived from this software without
15 //   specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: keir@google.com (Keir Mierle)
30
31 #include "ceres/gradient_checking_cost_function.h"
32
33 #include <algorithm>
34 #include <cmath>
35 #include <numeric>
36 #include <string>
37 #include <vector>
38
39 #include "ceres/cost_function.h"
40 #include "ceres/internal/eigen.h"
41 #include "ceres/internal/scoped_ptr.h"
42 #include "ceres/parameter_block.h"
43 #include "ceres/problem.h"
44 #include "ceres/problem_impl.h"
45 #include "ceres/program.h"
46 #include "ceres/residual_block.h"
47 #include "ceres/dynamic_numeric_diff_cost_function.h"
48 #include "ceres/stringprintf.h"
49 #include "ceres/types.h"
50 #include "glog/logging.h"
51
52 namespace ceres {
53 namespace internal {
54
55 using std::abs;
56 using std::max;
57 using std::string;
58 using std::vector;
59
60 namespace {
61
62 // True if x and y have an absolute relative difference less than
63 // relative_precision and false otherwise. Stores the relative and absolute
64 // difference in relative/absolute_error if non-NULL.
65 bool IsClose(double x, double y, double relative_precision,
66              double *relative_error,
67              double *absolute_error) {
68   double local_absolute_error;
69   double local_relative_error;
70   if (!absolute_error) {
71     absolute_error = &local_absolute_error;
72   }
73   if (!relative_error) {
74     relative_error = &local_relative_error;
75   }
76   *absolute_error = abs(x - y);
77   *relative_error = *absolute_error / max(abs(x), abs(y));
78   if (x == 0 || y == 0) {
79     // If x or y is exactly zero, then relative difference doesn't have any
80     // meaning. Take the absolute difference instead.
81     *relative_error = *absolute_error;
82   }
83   return abs(*relative_error) < abs(relative_precision);
84 }
85
86 class GradientCheckingCostFunction : public CostFunction {
87  public:
88   GradientCheckingCostFunction(const CostFunction* function,
89                                const NumericDiffOptions& options,
90                                double relative_precision,
91                                const string& extra_info)
92       : function_(function),
93         relative_precision_(relative_precision),
94         extra_info_(extra_info) {
95     DynamicNumericDiffCostFunction<CostFunction, CENTRAL>*
96         finite_diff_cost_function =
97         new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>(
98             function,
99             DO_NOT_TAKE_OWNERSHIP,
100             options);
101
102     const vector<int32>& parameter_block_sizes =
103         function->parameter_block_sizes();
104     for (int i = 0; i < parameter_block_sizes.size(); ++i) {
105       finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]);
106     }
107     *mutable_parameter_block_sizes() = parameter_block_sizes;
108     set_num_residuals(function->num_residuals());
109     finite_diff_cost_function->SetNumResiduals(num_residuals());
110     finite_diff_cost_function_.reset(finite_diff_cost_function);
111   }
112
113   virtual ~GradientCheckingCostFunction() { }
114
115   virtual bool Evaluate(double const* const* parameters,
116                         double* residuals,
117                         double** jacobians) const {
118     if (!jacobians) {
119       // Nothing to check in this case; just forward.
120       return function_->Evaluate(parameters, residuals, NULL);
121     }
122
123     int num_residuals = function_->num_residuals();
124
125     // Make space for the jacobians of the two methods.
126     const vector<int32>& block_sizes = function_->parameter_block_sizes();
127     vector<Matrix> term_jacobians(block_sizes.size());
128     vector<Matrix> finite_difference_jacobians(block_sizes.size());
129     vector<double*> term_jacobian_pointers(block_sizes.size());
130     vector<double*> finite_difference_jacobian_pointers(block_sizes.size());
131     for (int i = 0; i < block_sizes.size(); i++) {
132       term_jacobians[i].resize(num_residuals, block_sizes[i]);
133       term_jacobian_pointers[i] = term_jacobians[i].data();
134       finite_difference_jacobians[i].resize(num_residuals, block_sizes[i]);
135       finite_difference_jacobian_pointers[i] =
136           finite_difference_jacobians[i].data();
137     }
138
139     // Evaluate the derivative using the user supplied code.
140     if (!function_->Evaluate(parameters,
141                              residuals,
142                              &term_jacobian_pointers[0])) {
143       LOG(WARNING) << "Function evaluation failed.";
144       return false;
145     }
146
147     // Evaluate the derivative using numeric derivatives.
148     finite_diff_cost_function_->Evaluate(
149         parameters,
150         residuals,
151         &finite_difference_jacobian_pointers[0]);
152
153     // See if any elements have relative error larger than the threshold.
154     int num_bad_jacobian_components = 0;
155     double worst_relative_error = 0;
156
157     // Accumulate the error message for all the jacobians, since it won't get
158     // output if there are no bad jacobian components.
159     string m;
160     for (int k = 0; k < block_sizes.size(); k++) {
161       // Copy the original jacobian blocks into the jacobians array.
162       if (jacobians[k] != NULL) {
163         MatrixRef(jacobians[k],
164                   term_jacobians[k].rows(),
165                   term_jacobians[k].cols()) = term_jacobians[k];
166       }
167
168       StringAppendF(&m,
169                     "========== "
170                     "Jacobian for " "block %d: (%ld by %ld)) "
171                     "==========\n",
172                     k,
173                     static_cast<long>(term_jacobians[k].rows()),
174                     static_cast<long>(term_jacobians[k].cols()));
175       // The funny spacing creates appropriately aligned column headers.
176       m += " block  row  col        user dx/dy    num diff dx/dy         "
177            "abs error    relative error         parameter          residual\n";
178
179       for (int i = 0; i < term_jacobians[k].rows(); i++) {
180         for (int j = 0; j < term_jacobians[k].cols(); j++) {
181           double term_jacobian = term_jacobians[k](i, j);
182           double finite_jacobian = finite_difference_jacobians[k](i, j);
183           double relative_error, absolute_error;
184           bool bad_jacobian_entry =
185               !IsClose(term_jacobian,
186                        finite_jacobian,
187                        relative_precision_,
188                        &relative_error,
189                        &absolute_error);
190           worst_relative_error = max(worst_relative_error, relative_error);
191
192           StringAppendF(&m, "%6d %4d %4d %17g %17g %17g %17g %17g %17g",
193                         k, i, j,
194                         term_jacobian, finite_jacobian,
195                         absolute_error, relative_error,
196                         parameters[k][j],
197                         residuals[i]);
198
199           if (bad_jacobian_entry) {
200             num_bad_jacobian_components++;
201             StringAppendF(
202                 &m, " ------ (%d,%d,%d) Relative error worse than %g",
203                 k, i, j, relative_precision_);
204           }
205           m += "\n";
206         }
207       }
208     }
209
210     // Since there were some bad errors, dump comprehensive debug info.
211     if (num_bad_jacobian_components) {
212       string header = StringPrintf("Detected %d bad jacobian component(s). "
213                                    "Worst relative error was %g.\n",
214                                    num_bad_jacobian_components,
215                                    worst_relative_error);
216       if (!extra_info_.empty()) {
217         header += "Extra info for this residual: " + extra_info_ + "\n";
218       }
219       LOG(WARNING) << "\n" << header << m;
220     }
221     return true;
222   }
223
224  private:
225   const CostFunction* function_;
226   internal::scoped_ptr<CostFunction> finite_diff_cost_function_;
227   double relative_precision_;
228   string extra_info_;
229 };
230
231 }  // namespace
232
233 CostFunction *CreateGradientCheckingCostFunction(
234     const CostFunction *cost_function,
235     double relative_step_size,
236     double relative_precision,
237     const string& extra_info) {
238   NumericDiffOptions numeric_diff_options;
239   numeric_diff_options.relative_step_size = relative_step_size;
240
241   return new GradientCheckingCostFunction(cost_function,
242                                           numeric_diff_options,
243                                           relative_precision,
244                                           extra_info);
245 }
246
247 ProblemImpl* CreateGradientCheckingProblemImpl(ProblemImpl* problem_impl,
248                                                double relative_step_size,
249                                                double relative_precision) {
250   // We create new CostFunctions by wrapping the original CostFunction
251   // in a gradient checking CostFunction. So its okay for the
252   // ProblemImpl to take ownership of it and destroy it. The
253   // LossFunctions and LocalParameterizations are reused and since
254   // they are owned by problem_impl, gradient_checking_problem_impl
255   // should not take ownership of it.
256   Problem::Options gradient_checking_problem_options;
257   gradient_checking_problem_options.cost_function_ownership = TAKE_OWNERSHIP;
258   gradient_checking_problem_options.loss_function_ownership =
259       DO_NOT_TAKE_OWNERSHIP;
260   gradient_checking_problem_options.local_parameterization_ownership =
261       DO_NOT_TAKE_OWNERSHIP;
262
263   ProblemImpl* gradient_checking_problem_impl = new ProblemImpl(
264       gradient_checking_problem_options);
265
266   Program* program = problem_impl->mutable_program();
267
268   // For every ParameterBlock in problem_impl, create a new parameter
269   // block with the same local parameterization and constancy.
270   const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
271   for (int i = 0; i < parameter_blocks.size(); ++i) {
272     ParameterBlock* parameter_block = parameter_blocks[i];
273     gradient_checking_problem_impl->AddParameterBlock(
274         parameter_block->mutable_user_state(),
275         parameter_block->Size(),
276         parameter_block->mutable_local_parameterization());
277
278     if (parameter_block->IsConstant()) {
279       gradient_checking_problem_impl->SetParameterBlockConstant(
280           parameter_block->mutable_user_state());
281     }
282   }
283
284   // For every ResidualBlock in problem_impl, create a new
285   // ResidualBlock by wrapping its CostFunction inside a
286   // GradientCheckingCostFunction.
287   const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
288   for (int i = 0; i < residual_blocks.size(); ++i) {
289     ResidualBlock* residual_block = residual_blocks[i];
290
291     // Build a human readable string which identifies the
292     // ResidualBlock. This is used by the GradientCheckingCostFunction
293     // when logging debugging information.
294     string extra_info = StringPrintf(
295         "Residual block id %d; depends on parameters [", i);
296     vector<double*> parameter_blocks;
297     for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) {
298       ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
299       parameter_blocks.push_back(parameter_block->mutable_user_state());
300       StringAppendF(&extra_info, "%p", parameter_block->mutable_user_state());
301       extra_info += (j < residual_block->NumParameterBlocks() - 1) ? ", " : "]";
302     }
303
304     // Wrap the original CostFunction in a GradientCheckingCostFunction.
305     CostFunction* gradient_checking_cost_function =
306         CreateGradientCheckingCostFunction(residual_block->cost_function(),
307                                            relative_step_size,
308                                            relative_precision,
309                                            extra_info);
310
311     // The const_cast is necessary because
312     // ProblemImpl::AddResidualBlock can potentially take ownership of
313     // the LossFunction, but in this case we are guaranteed that this
314     // will not be the case, so this const_cast is harmless.
315     gradient_checking_problem_impl->AddResidualBlock(
316         gradient_checking_cost_function,
317         const_cast<LossFunction*>(residual_block->loss_function()),
318         parameter_blocks);
319   }
320
321   // Normally, when a problem is given to the solver, we guarantee
322   // that the state pointers for each parameter block point to the
323   // user provided data. Since we are creating this new problem from a
324   // problem given to us at an arbitrary stage of the solve, we cannot
325   // depend on this being the case, so we explicitly call
326   // SetParameterBlockStatePtrsToUserStatePtrs to ensure that this is
327   // the case.
328   gradient_checking_problem_impl
329       ->mutable_program()
330       ->SetParameterBlockStatePtrsToUserStatePtrs();
331
332   return gradient_checking_problem_impl;
333 }
334
335
336 }  // namespace internal
337 }  // namespace ceres