Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright (C) 2002-2016 Konrad-Zuse-Zentrum */
7 /* fuer Informationstechnik Berlin */
8 /* */
9 /* SCIP is distributed under the terms of the ZIB Academic License. */
10 /* */
11 /* You should have received a copy of the ZIB Academic License */
12 /* along with SCIP; see the file COPYING. If not email to scip@zib.de. */
13 /* */
14 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
15 
16 /**@file exprinterpret_cppad.cpp
17  * @brief methods to interpret (evaluate) an expression tree "fast" using CppAD
18  * @ingroup EXPRINTS
19  * @author Stefan Vigerske
20  */
21 
22 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
23 
24 #include "scip/def.h"
25 #include "blockmemshell/memory.h"
26 #include "nlpi/pub_expr.h"
27 #include "nlpi/exprinterpret.h"
28 
29 #include <cmath>
30 #include <vector>
31 using std::vector;
32 
33 /* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
34  * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
35  * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
36  * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
37  * vector<*>::operator[] only. */
38 /*lint --e{747,732}*/
39 
40 /* Turn off lint info "1702 operator '...' is both an ordinary function 'CppAD::operator...' and a member function 'CppAD::SCIPInterval::operator...'.
41  * However, the functions have different signatures (the CppAD working on double, the SCIPInterval member
42  * function working on SCIPInterval's.
43  */
44 /*lint --e{1702}*/
45 
46 /* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivaties of power operators
47  * via CppAD's user-atomic function feature
48  * our customized implementation should give better results (tighter intervals) for the interval data type
49  */
50 /* #define NO_CPPAD_USER_ATOMIC */
51 
52 /** sign of a value (-1 or +1)
53  *
54  * 0.0 has sign +1
55  */
56 #define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
57 
58 /* in order to use intervals as operands in CppAD,
59  * we need to include the intervalarithext.h very early and require the interval operations to be in the CppAD namespace */
60 #define SCIPInterval_NAMESPACE CppAD
61 #include "nlpi/intervalarithext.h"
62 
63 SCIP_Real CppAD::SCIPInterval::infinity = SCIP_DEFAULT_INFINITY;
64 using CppAD::SCIPInterval;
65 
66 /* CppAD needs to know a fixed upper bound on the number of threads at compile time.
67  * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
68  */
69 #ifndef CPPAD_MAX_NUM_THREADS
70 #ifndef NPARASCIP
71 #define CPPAD_MAX_NUM_THREADS 64
72 #else
73 #define CPPAD_MAX_NUM_THREADS 1
74 #endif
75 #endif
76 
77 #include <cppad/cppad.hpp>
78 #include <cppad/error_handler.hpp>
79 
80 /* CppAD is not thread-safe by itself, but uses some static datastructures
81  * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
82  * This allocator requires to know the number of threads and a thread number for each thread.
83  * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
84  */
85 #ifndef NPARASCIP
86 #include <pthread.h>
87 
88 /* workaround error message regarding missing implementation of tanh during initialization of static variables (see cppad/local/erf.hpp) */
89 namespace CppAD
90 {
91 template <> SCIPInterval erf_template(
92  const SCIPInterval &x
93  )
94 {
95  CPPAD_ASSERT_FIRST_CALL_NOT_PARALLEL;
96  return SCIPInterval();
97 }
98 template <> AD<SCIPInterval> erf_template(
99  const AD<SCIPInterval> &x
100  )
101 {
102  CPPAD_ASSERT_FIRST_CALL_NOT_PARALLEL;
103  return AD<SCIPInterval>();
104 }
105 }
106 
107 /** mutex for locking in pthread case */
108 static pthread_mutex_t cppadmutex = PTHREAD_MUTEX_INITIALIZER;
109 
110 /** key for accessing thread specific information */
111 static pthread_key_t thread_specific_key;
112 
113 /** currently registered number of threads */
114 static size_t ncurthreads = 0;
115 
116 /** CppAD callback function that indicates whether we are running in parallel mode */
117 static
118 bool in_parallel(void)
119 {
120  return ncurthreads > 1;
121 }
122 
123 /** CppAD callback function that returns the number of the current thread
124  *
125  * assigns a new number to the thread if new
126  */
127 static
128 size_t thread_num(void)
129 {
130  size_t threadnum;
131  void* specific;
132 
133  specific = pthread_getspecific(thread_specific_key);
134 
135  /* if no data for this thread yet, then assign a new thread number to the current thread
136  * we store the thread number incremented by one, to distinguish the absence of data (=0) from existing data
137  */
138  if( specific == NULL )
139  {
140  pthread_mutex_lock(&cppadmutex);
141 
142  SCIPdebugMessage("Assigning thread number %lu to thread %p.\n", (long unsigned int)ncurthreads, (void*)pthread_self());
143 
144  pthread_setspecific(thread_specific_key, (void*)(ncurthreads + 1));
145 
146  threadnum = ncurthreads;
147 
148  ++ncurthreads;
149 
150  pthread_mutex_unlock(&cppadmutex);
151 
152  assert(pthread_getspecific(thread_specific_key) != NULL);
153  assert((size_t)pthread_getspecific(thread_specific_key) == threadnum + 1);
154  }
155  else
156  {
157  threadnum = (size_t)(specific) - 1;
158  }
159 
160  assert(threadnum < ncurthreads);
161 
162  return threadnum;
163 }
164 
165 /** sets up CppAD's datastructures for running in multithreading mode
166  *
167  * It must be called once before multithreading is started.
168  */
169 static
170 char init_parallel(void)
171 {
172  pthread_key_create(&thread_specific_key, NULL);
173 
174  CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
175  CppAD::parallel_ad<double>();
176  CppAD::parallel_ad<SCIPInterval>();
177 
178  return 0;
179 }
180 
181 /** a dummy variable that can is initialized to the result of init_parallel
182  *
183  * The purpose is to make sure that init_parallel() is called before any multithreading is started.
184  */
185 static char init_parallel_return = init_parallel();
186 
187 #endif // NPARASCIP
188 
189 /** definition of CondExpOp for SCIPInterval (required by CppAD) */
190 inline
192  enum CppAD::CompareOp cop,
193  const SCIPInterval& left,
194  const SCIPInterval& right,
195  const SCIPInterval& trueCase,
196  const SCIPInterval& falseCase)
197 { /*lint --e{715}*/
198  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
199  "SCIPInterval CondExpOp(...)",
200  "Error: cannot use CondExp with an interval type"
201  );
202 
203  return SCIPInterval();
204 }
205 
206 /** another function that returns whether two intervals are the same (required by CppAD) */
207 inline
209  const SCIPInterval& x, /**< first operand */
210  const SCIPInterval& y /**< second operand */
211  )
212 {
213  return x == y;
214 }
215 
216 /** another function required by CppAD */
217 inline
219  const SCIPInterval& x /**< operand */
220  )
221 { /*lint --e{715}*/
222  return true;
223 }
224 
225 /** returns whether the interval equals [0,0] */
226 inline
228  const SCIPInterval& x /**< operand */
229  )
230 {
231  return (x == 0.0);
232 }
233 
234 /** returns whether the interval equals [1,1] */
235 inline
237  const SCIPInterval& x /**< operand */
238  )
239 {
240  return (x == 1.0);
241 }
242 
243 /** yet another function that checks whether two intervals are equal */
244 inline
246  const SCIPInterval& x, /**< first operand */
247  const SCIPInterval& y /**< second operand */
248  )
249 {
250  return (x == y);
251 }
252 
253 /** greater than zero not defined for intervals */
254 inline
256  const SCIPInterval& x /**< operand */
257  )
258 { /*lint --e{715}*/
259  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
260  "GreaterThanZero(x)",
261  "Error: cannot use GreaterThanZero with interval"
262  );
263 
264  return false;
265 }
266 
267 /** greater than or equal zero not defined for intervals */
268 inline
270  const SCIPInterval& x /**< operand */
271  )
272 { /*lint --e{715}*/
273  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__ ,
274  "GreaterThanOrZero(x)",
275  "Error: cannot use GreaterThanOrZero with interval"
276  );
277 
278  return false;
279 }
280 
281 /** less than not defined for intervals */
282 inline
284  const SCIPInterval& x /**< operand */
285  )
286 { /*lint --e{715}*/
287  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
288  "LessThanZero(x)",
289  "Error: cannot use LessThanZero with interval"
290  );
291 
292  return false;
293 }
294 
295 /** less than or equal not defined for intervals */
296 inline
298  const SCIPInterval& x /**< operand */
299  )
300 { /*lint --e{715}*/
301  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
302  "LessThanOrZero(x)",
303  "Error: cannot use LessThanOrZero with interval"
304  );
305 
306  return false;
307 }
308 
309 /** conversion to integers not defined for intervals */
310 inline
312  const SCIPInterval& x /**< operand */
313  )
314 { /*lint --e{715}*/
315  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
316  "Integer(x)",
317  "Error: cannot use Integer with interval"
318  );
319 
320  return 0;
321 }
322 
323 /** printing of an interval (required by CppAD) */
324 inline
325 std::ostream& operator<<(std::ostream& out, const SCIP_INTERVAL& x)
326 {
327  out << '[' << x.inf << ',' << x.sup << ']';
328  return out;
329 }
330 
331 using CppAD::AD;
332 
333 /** expression interpreter */
334 struct SCIP_ExprInt
335 {
336  BMS_BLKMEM* blkmem; /**< block memory data structure */
337 };
338 
339 /** expression specific interpreter data */
340 struct SCIP_ExprIntData
341 {
342 public:
343  /** constructor */
344  SCIP_ExprIntData()
345  : val(0.0), need_retape(true), int_need_retape(true), need_retape_always(false), userevalcapability(SCIP_EXPRINTCAPABILITY_ALL), blkmem(NULL), root(NULL)
346  { }
347 
348  /** destructor */
349  ~SCIP_ExprIntData()
350  { }/*lint --e{1540}*/
351 
352  vector< AD<double> > X; /**< vector of dependent variables */
353  vector< AD<double> > Y; /**< result vector */
354  CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
355 
356  vector<double> x; /**< current values of dependent variables */
357  double val; /**< current function value */
358  bool need_retape; /**< will retaping be required for the next point evaluation? */
359 
360  vector< AD<SCIPInterval> > int_X; /**< interval vector of dependent variables */
361  vector< AD<SCIPInterval> > int_Y; /**< interval result vector */
362  CppAD::ADFun<SCIPInterval> int_f; /**< the function to evaluate on intervals as CppAD object */
363 
364  vector<SCIPInterval> int_x; /**< current interval values of dependent variables */
365  SCIPInterval int_val; /**< current interval function value */
366  bool int_need_retape; /**< will retaping be required for the next interval evaluation? */
367 
368  bool need_retape_always; /**< will retaping be always required? */
369  SCIP_EXPRINTCAPABILITY userevalcapability; /**< (intersection of) capabilities of evaluation rountines of user expressions */
370 
371  BMS_BLKMEM* blkmem; /**< block memory used to allocate expresstion tree */
372  SCIP_EXPR* root; /**< copy of expression tree; @todo we should not need to make a copy */
373 };
374 
375 #ifndef NO_CPPAD_USER_ATOMIC
376 
377 /** computes sparsity of jacobian for a univariate function during a forward sweep
378  *
379  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
380  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
381  */
382 static
383 bool univariate_for_sparse_jac(
384  size_t q, /**< number of columns in R */
385  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
386  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
387  )
388 {
389  assert(r.size() == q);
390  assert(s.size() == q);
391 
392  s = r;
393 
394  return true;
395 }
396 
397 /** Computes sparsity of jacobian during a reverse sweep
398  *
399  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
400  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
401  */
402 static
403 bool univariate_rev_sparse_jac(
404  size_t q, /**< number of rows in R */
405  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
406  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
407  )
408 {
409  assert(r.size() == q);
410  assert(s.size() == q);
411 
412  s = r;
413 
414  return true;
415 }
416 
417 /** computes sparsity of hessian during a reverse sweep
418  *
419  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
420  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
421  */
422 static
423 bool univariate_rev_sparse_hes(
424  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
425  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
426  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
427  size_t q, /**< number of columns in R, U, and V */
428  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
429  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
430  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
431  )
432 { /*lint --e{439,715}*/ /* @todo take vx into account */
433  assert(r.size() == q);
434  assert(s.size() == 1);
435  assert(t.size() == 1);
436  assert(u.size() == q);
437  assert(v.size() == q);
438 
439  // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
440  t[0] = s[0];
441 
442  // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
443  // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
444  v = u;
445  if( s[0] )
446  for( size_t j = 0; j < q; ++j )
447  if( r[j] )
448  v[j] = true;
449 
450  return true;
451 }
452 
453 
454 /** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
455  *
456  * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
457  * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
458  *
459  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
460  */
461 template<class Type>
462 class atomic_posintpower : public CppAD::atomic_base<Type>
463 {
464 public:
466  : CppAD::atomic_base<Type>("posintpower"),
467  exponent(0)
468  {
469  /* indicate that we want to use bool-based sparsity pattern */
470  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
471  }
472 
473 private:
474  /** exponent value for next call to forward or reverse */
475  int exponent;
476 
477  /** stores exponent value corresponding to next call to forward or reverse
478  *
479  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
480  */
481  virtual void set_id(size_t id)
482  {
483  exponent = (int) id;
484  }
485 
486  /** forward sweep of positive integer power
487  *
488  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
489  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
490  * in the taylor expansion of f(x) = x^p.
491  * Thus, y = x^p
492  * = tx[0]^p,
493  * y' = p * x^(p-1) * x'
494  * = p * tx[0]^(p-1) * tx[1],
495  * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
496  * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
497  */
498  bool forward(
499  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
500  size_t p, /**< highest order Taylor coefficient that we are evaluating */
501  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
502  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
503  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
504  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
505  )
506  {
507  assert(exponent > 1);
508  assert(tx.size() >= p+1);
509  assert(ty.size() >= p+1);
510  assert(q <= p);
511 
512  if( vx.size() > 0 )
513  {
514  assert(vx.size() == 1);
515  assert(vy.size() == 1);
516  assert(p == 0);
517 
518  vy[0] = vx[0];
519  }
520 
521  if( q == 0 /* q <= 0 && 0 <= p */ )
522  {
523  ty[0] = CppAD::pow(tx[0], exponent);
524  }
525 
526  if( q <= 1 && 1 <= p )
527  {
528  ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
529  ty[1] *= double(exponent);
530  }
531 
532  if( q <= 2 && 2 <= p )
533  {
534  if( exponent > 2 )
535  {
536  // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
537  ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
538  ty[2] *= (exponent-1) / 2.0;
539  ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
540  ty[2] *= exponent;
541  }
542  else
543  {
544  assert(exponent == 2);
545  // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
546  ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
547  }
548  }
549 
550  /* higher order derivatives not implemented */
551  if( p > 2 )
552  return false;
553 
554  return true;
555  }
556 
557  /** reverse sweep of positive integer power
558  *
559  * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
560  * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
561  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
562  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
563  * That is, we have to compute
564  *\f$
565  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
566  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
567  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
568  * \f$
569  *
570  * For k = 0, this means
571  *\f$
572  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
573  * = py[0] * (\partial x^p / \partial x)
574  * = py[0] * p * tx[0]^(p-1)
575  *\f$
576  *
577  * For k = 1, this means
578  * \f$
579  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
580  * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
581  * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
582  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
583  * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
584  * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
585  * \f$
586  */
587  bool reverse(
588  size_t p, /**< highest order Taylor coefficient that we are evaluating */
589  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
590  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
591  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
592  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
593  )
594  { /*lint --e{715}*/
595  assert(exponent > 1);
596  assert(px.size() >= p+1);
597  assert(py.size() >= p+1);
598  assert(tx.size() >= p+1);
599 
600  switch( p )
601  {
602  case 0:
603  // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
604  px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
605  px[0] *= exponent;
606  break;
607 
608  case 1:
609  // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
610  px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
611  px[0] *= exponent-1;
612  px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
613  px[0] *= exponent;
614  // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
615  px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
616  px[1] *= exponent;
617  break;
618 
619  default:
620  return false;
621  }
622 
623  return true;
624  }
625 
626  using CppAD::atomic_base<Type>::for_sparse_jac;
627 
628  /** computes sparsity of jacobian during a forward sweep
629  *
630  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
631  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
632  */
633  bool for_sparse_jac(
634  size_t q, /**< number of columns in R */
635  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
636  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
637  )
638  {
639  return univariate_for_sparse_jac(q, r, s);
640  }
641 
642  using CppAD::atomic_base<Type>::rev_sparse_jac;
643 
644  /** computes sparsity of jacobian during a reverse sweep
645  *
646  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
647  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
648  */
649  bool rev_sparse_jac(
650  size_t q, /**< number of rows in R */
651  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
652  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
653  )
654  {
655  return univariate_rev_sparse_jac(q, r, s);
656  }
657 
658  using CppAD::atomic_base<Type>::rev_sparse_hes;
659 
660  /** computes sparsity of hessian during a reverse sweep
661  *
662  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
663  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
664  */
665  bool rev_sparse_hes(
666  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
667  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
668  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
669  size_t q, /**< number of columns in R, U, and V */
670  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
671  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
672  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
673  )
674  {
675  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
676  }
677 };
678 
679 /** power function with natural exponents */
680 template<class Type>
681 static
682 void posintpower(
683  const vector<Type>& in, /**< vector which first argument is base */
684  vector<Type>& out, /**< vector where to store result in first argument */
685  size_t exponent /**< exponent */
686  )
687 {
689  pip(in, out, exponent);
690 }
691 
692 #else
693 
694 /** power function with natural exponents */
695 template<class Type>
696 void posintpower(
697  const vector<Type>& in, /**< vector which first argument is base */
698  vector<Type>& out, /**< vector where to store result in first argument */
699  size_t exponent /**< exponent */
700  )
701 {
702  out[0] = pow(in[0], (int)exponent);
703 }
704 
705 #endif
706 
707 
708 #ifndef NO_CPPAD_USER_ATOMIC
709 
710 /** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
711  *
712  * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
713  * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
714  * a continuously differentiable function.
715  *
716  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
717  */
718 template<class Type>
719 class atomic_signpower : public CppAD::atomic_base<Type>
720 {
721 public:
723  : CppAD::atomic_base<Type>("signpower"),
724  exponent(0.0)
725  {
726  /* indicate that we want to use bool-based sparsity pattern */
727  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
728  }
729 
730 private:
731  /** exponent for use in next call to forward or reverse */
732  SCIP_Real exponent;
733 
734  /** stores exponent corresponding to next call to forward or reverse
735  *
736  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
737  */
738  virtual void set_id(size_t id)
739  {
740  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
741  }
742 
743  /** forward sweep of signpower
744  *
745  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
746  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
747  * in the taylor expansion of f(x) = sign(x)abs(x)^p.
748  * Thus, y = sign(x)abs(x)^p
749  * = sign(tx[0])abs(tx[0])^p,
750  * y' = p * abs(x)^(p-1) * x'
751  * = p * abs(tx[0])^(p-1) * tx[1],
752  * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
753  * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
754  */
755  bool forward(
756  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
757  size_t p, /**< highest order Taylor coefficient that we are evaluating */
758  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
759  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
760  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
761  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
762  )
763  {
764  assert(exponent > 0.0);
765  assert(tx.size() >= p+1);
766  assert(ty.size() >= p+1);
767  assert(q <= p);
768 
769  if( vx.size() > 0 )
770  {
771  assert(vx.size() == 1);
772  assert(vy.size() == 1);
773  assert(p == 0);
774 
775  vy[0] = vx[0];
776  }
777 
778  if( q == 0 /* q <= 0 && 0 <= p */ )
779  {
780  ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
781  }
782 
783  if( q <= 1 && 1 <= p )
784  {
785  ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
786  ty[1] *= exponent;
787  }
788 
789  if( q <= 2 && 2 <= p )
790  {
791  if( exponent != 2.0 )
792  {
793  ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
794  ty[2] *= (exponent - 1.0) / 2.0;
795  ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
796  ty[2] *= exponent;
797  }
798  else
799  {
800  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
801  ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
802  ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
803  }
804  }
805 
806  /* higher order derivatives not implemented */
807  if( p > 2 )
808  return false;
809 
810  return true;
811  }
812 
813  /** reverse sweep of signpower
814  *
815  * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
816  * y(x) = [ f(x), f'(x), f''(x), ... ].
817  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
818  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
819  * That is, we have to compute
820  *\f$
821  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
822  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
823  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
824  *\f$
825  *
826  * For k = 0, this means
827  *\f$
828  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
829  * = py[0] * (\partial f(x) / \partial x)
830  * = py[0] * p * abs(tx[0])^(p-1)
831  * \f$
832  *
833  * For k = 1, this means
834  *\f$
835  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
836  * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
837  * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
838  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
839  * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
840  * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
841  * \f$
842  */
843  bool reverse(
844  size_t p, /**< highest order Taylor coefficient that we are evaluating */
845  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
846  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
847  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
848  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
849  )
850  { /*lint --e{715}*/
851  assert(exponent > 1);
852  assert(px.size() >= p+1);
853  assert(py.size() >= p+1);
854  assert(tx.size() >= p+1);
855 
856  switch( p )
857  {
858  case 0:
859  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
860  px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
861  px[0] *= p;
862  break;
863 
864  case 1:
865  if( exponent != 2.0 )
866  {
867  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
868  px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
869  px[0] *= exponent - 1.0;
870  px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
871  px[0] *= exponent;
872  // px[1] = py[1] * p * abs(tx[0])^(p-1)
873  px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
874  px[1] *= exponent;
875  }
876  else
877  {
878  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
879  px[0] = py[1] * tx[1] * SIGN(tx[0]);
880  px[0] += py[0] * REALABS(tx[0]);
881  px[0] *= 2.0;
882  // px[1] = py[1] * 2.0 * abs(tx[0])
883  px[1] = py[1] * REALABS(tx[0]);
884  px[1] *= 2.0;
885  }
886  break;
887 
888  default:
889  return false;
890  }
891 
892  return true;
893  }
894 
895  using CppAD::atomic_base<Type>::for_sparse_jac;
896 
897  /** computes sparsity of jacobian during a forward sweep
898  *
899  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
900  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
901  */
902  bool for_sparse_jac(
903  size_t q, /**< number of columns in R */
904  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
905  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
906  )
907  {
908  return univariate_for_sparse_jac(q, r, s);
909  }
910 
911  using CppAD::atomic_base<Type>::rev_sparse_jac;
912 
913  /** computes sparsity of jacobian during a reverse sweep
914  *
915  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
916  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
917  */
918  bool rev_sparse_jac(
919  size_t q, /**< number of rows in R */
920  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
921  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
922  )
923  {
924  return univariate_rev_sparse_jac(q, r, s);
925  }
926 
927  using CppAD::atomic_base<Type>::rev_sparse_hes;
928 
929  /** computes sparsity of hessian during a reverse sweep
930  *
931  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
932  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
933  */
934  bool rev_sparse_hes(
935  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
936  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
937  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
938  size_t q, /**< number of columns in S and R */
939  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
940  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
941  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
942  )
943  {
944  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
945  }
946 
947 };
948 
949 /** Specialization of atomic_signpower template for intervals */
950 template<>
951 class atomic_signpower<SCIPInterval> : public CppAD::atomic_base<SCIPInterval>
952 {
953 public:
955  : CppAD::atomic_base<SCIPInterval>("signpowerint"),
956  exponent(0.0)
957  {
958  /* indicate that we want to use bool-based sparsity pattern */
959  this->option(CppAD::atomic_base<SCIPInterval>::bool_sparsity_enum);
960  }
961 
962 private:
963  /** exponent for use in next call to forward or reverse */
964  SCIP_Real exponent;
965 
966  /** stores exponent corresponding to next call to forward or reverse
967  *
968  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
969  */
970  virtual void set_id(size_t id)
971  {
972  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
973  }
974 
975  /** specialization of atomic_signpower::forward template for SCIPinterval
976  *
977  * @todo try to compute tighter resultants
978  */
979  bool forward(
980  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
981  size_t p, /**< highest order Taylor coefficient that we are evaluating */
982  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
983  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
984  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
985  CppAD::vector<SCIPInterval>& ty /**< vector to store taylor coefficients of y */
986  )
987  {
988  assert(exponent > 0.0);
989  assert(tx.size() >= p+1);
990  assert(ty.size() >= p+1);
991  assert(q <= p);
992 
993  if( vx.size() > 0 )
994  {
995  assert(vx.size() == 1);
996  assert(vy.size() == 1);
997  assert(p == 0);
998 
999  vy[0] = vx[0];
1000  }
1001 
1002  if( q == 0 /* q <= 0 && 0 <= p */ )
1003  {
1004  ty[0] = CppAD::signpow(tx[0], exponent);
1005  }
1006 
1007  if( q <= 1 && 1 <= p )
1008  {
1009  ty[1] = CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[1];
1010  ty[1] *= p;
1011  }
1012 
1013  if( q <= 2 && 2 <= p )
1014  {
1015  if( exponent != 2.0 )
1016  {
1017  ty[2] = CppAD::signpow(tx[0], exponent - 2.0) * CppAD::square(tx[1]);
1018  ty[2] *= (exponent - 1.0) / 2.0;
1019  ty[2] += CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[2];
1020  ty[2] *= exponent;
1021  }
1022  else
1023  {
1024  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
1025  ty[2] = CppAD::sign(tx[0]) * CppAD::square(tx[1]);
1026  ty[2] += 2.0 * CppAD::abs(tx[0]) * tx[2];
1027  }
1028  }
1029 
1030  /* higher order derivatives not implemented */
1031  if( p > 2 )
1032  return false;
1033 
1034  return true;
1035  }
1036 
1037  /** specialization of atomic_signpower::reverse template for SCIPinterval
1038  *
1039  * @todo try to compute tighter resultants
1040  */
1041  bool reverse(
1042  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1043  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
1044  const CppAD::vector<SCIPInterval>& ty, /**< values for taylor coefficients of y */
1045  CppAD::vector<SCIPInterval>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1046  const CppAD::vector<SCIPInterval>& py /**< values for partial derivatives of g(x) w.r.t. y */
1047  )
1048  { /*lint --e{715} */
1049  assert(exponent > 1);
1050  assert(px.size() >= p+1);
1051  assert(py.size() >= p+1);
1052  assert(tx.size() >= p+1);
1053 
1054  switch( p )
1055  {
1056  case 0:
1057  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
1058  px[0] = py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1059  px[0] *= exponent;
1060  break;
1061 
1062  case 1:
1063  if( exponent != 2.0 )
1064  {
1065  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
1066  px[0] = py[1] * tx[1] * CppAD::signpow(tx[0], exponent - 2.0);
1067  px[0] *= exponent - 1.0;
1068  px[0] += py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1069  px[0] *= exponent;
1070  // px[1] = py[1] * p * abs(tx[0])^(p-1)
1071  px[1] = py[1] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1072  px[1] *= exponent;
1073  }
1074  else
1075  {
1076  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
1077  px[0] = py[1] * tx[1] * CppAD::sign(tx[0]);
1078  px[0] += py[0] * CppAD::abs(tx[0]);
1079  px[0] *= 2.0;
1080  // px[1] = py[1] * 2.0 * abs(tx[0])
1081  px[1] = py[1] * CppAD::abs(tx[0]);
1082  px[1] *= 2.0;
1083  }
1084  break;
1085 
1086  default:
1087  return false;
1088  }
1089 
1090  return true;
1091  }
1092 
1093  using CppAD::atomic_base<SCIPInterval>::for_sparse_jac;
1094 
1095  /** computes sparsity of jacobian during a forward sweep
1096  *
1097  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1098  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1099  */
1100  bool for_sparse_jac(
1101  size_t q, /**< number of columns in R */
1102  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1103  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1104  )
1105  {
1106  return univariate_for_sparse_jac(q, r, s);
1107  }
1108 
1109  using CppAD::atomic_base<SCIPInterval>::rev_sparse_jac;
1110 
1111  /** computes sparsity of jacobian during a reverse sweep
1112  *
1113  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
1114  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1115  */
1116  bool rev_sparse_jac(
1117  size_t q, /**< number of rows in R */
1118  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
1119  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
1120  )
1121  {
1122  return univariate_rev_sparse_jac(q, r, s);
1123  }
1124 
1125  using CppAD::atomic_base<SCIPInterval>::rev_sparse_hes;
1126 
1127  /** computes sparsity of hessian during a reverse sweep
1128  *
1129  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
1130  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1131  */
1132  bool rev_sparse_hes(
1133  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1134  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1135  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1136  size_t q, /**< number of columns in S and R */
1137  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1138  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1139  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1140  )
1141  {
1142  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
1143  }
1144 };
1145 
1146 /** template for evaluation for signpower operator */
1147 template<class Type>
1148 static
1149 void evalSignPower(
1150  Type& resultant, /**< resultant */
1151  const Type& arg, /**< operand */
1152  SCIP_EXPR* expr /**< expression that holds the exponent */
1153  )
1154 {
1155  vector<Type> in(1, arg);
1156  vector<Type> out(1);
1157 
1159  sp(in, out, (size_t)(void*)expr);
1160 
1161  resultant = out[0];
1162  return;
1163 }
1164 
1165 #else
1166 
1167 /** template for evaluation for signpower operator
1168  *
1169  * Only implemented for real numbers, thus gives error by default.
1170  */
1171 template<class Type>
1172 static
1173 void evalSignPower(
1174  Type& resultant, /**< resultant */
1175  const Type& arg, /**< operand */
1176  SCIP_EXPR* expr /**< expression that holds the exponent */
1177  )
1178 { /*lint --e{715}*/
1179  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1180  "evalSignPower()",
1181  "Error: SignPower not implemented for this value type"
1182  );
1183 }
1184 
1185 /** specialization of signpower evaluation for real numbers */
1186 template<>
1187 void evalSignPower(
1188  CppAD::AD<double>& resultant, /**< resultant */
1189  const CppAD::AD<double>& arg, /**< operand */
1190  SCIP_EXPR* expr /**< expression that holds the exponent */
1191  )
1192 {
1193  SCIP_Real exponent;
1194 
1195  exponent = SCIPexprGetSignPowerExponent(expr);
1196 
1197  if( arg == 0.0 )
1198  resultant = 0.0;
1199  else if( arg > 0.0 )
1200  resultant = pow( arg, exponent);
1201  else
1202  resultant = -pow(-arg, exponent);
1203 }
1204 
1205 #endif
1206 
1207 
1208 #ifndef NO_CPPAD_USER_ATOMIC
1209 
1210 template<class Type>
1212  SCIP_EXPR* expr,
1213  Type* x,
1214  Type& funcval,
1215  Type* gradient,
1216  Type* hessian
1217  )
1218 {
1219  return SCIPexprEvalUser(expr, x, &funcval, gradient, hessian);
1220 }
1221 
1222 template<>
1224  SCIP_EXPR* expr,
1225  SCIPInterval* x,
1226  SCIPInterval& funcval,
1227  SCIPInterval* gradient,
1228  SCIPInterval* hessian
1229  )
1230 {
1231  return SCIPexprEvalIntUser(expr, SCIPInterval::infinity, x, &funcval, gradient, hessian);
1232 }
1233 
1234 /** Automatic differentiation of user expression as CppAD user-atomic function.
1235  *
1236  * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
1237  */
1238 template<class Type>
1239 class atomic_userexpr : public CppAD::atomic_base<Type>
1240 {
1241 public:
1243  : CppAD::atomic_base<Type>("userexpr"),
1244  expr(NULL)
1245  {
1246  /* indicate that we want to use bool-based sparsity pattern */
1247  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
1248  }
1249 
1250 private:
1251  /** user expression */
1252  SCIP_EXPR* expr;
1253 
1254  /** stores user expression corresponding to next call to forward or reverse
1255  *
1256  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
1257  */
1258  virtual void set_id(size_t id)
1259  {
1260  expr = (SCIP_EXPR*)(void*)id;
1261  assert(SCIPexprGetOperator(expr) == SCIP_EXPR_USER);
1262  }
1263 
1264  /** forward sweep of userexpr
1265  *
1266  * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
1267  * Note, that p and q are interchanged!
1268  *
1269  * For a scalar variable t, let
1270  * Y(t) = f(X(t))
1271  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1272  * where for x^i the i an index, while for t^i the i is an exponent.
1273  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1274  *
1275  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1276  * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
1277  * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
1278  * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
1279  *
1280  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
1281  * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
1282  * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
1283  * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
1284  */
1285  bool forward(
1286  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
1287  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1288  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1289  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
1290  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1291  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
1292  )
1293  {
1294  assert(expr != NULL);
1295  assert(ty.size() == p+1);
1296  assert(q <= p);
1297 
1298  size_t n = tx.size() / (p+1);
1299  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1300  assert(n >= 1);
1301 
1302  if( vx.size() > 0 )
1303  {
1304  assert(vx.size() == n);
1305  assert(vy.size() == 1);
1306  assert(p == 0);
1307 
1308  /* y_0 is a variable if at least one of the x_i is a variable */
1309  vy[0] = false;
1310  for( size_t i = 0; i < n; ++i )
1311  if( vx[i] )
1312  {
1313  vy[0] = true;
1314  break;
1315  }
1316  }
1317 
1318  Type* x = new Type[n];
1319  Type* gradient = NULL;
1320  Type* hessian = NULL;
1321 
1322  if( q <= 2 && 1 <= p )
1323  gradient = new Type[n];
1324  if( q <= 2 && 2 <= p )
1325  hessian = new Type[n*n];
1326 
1327  for( size_t i = 0; i < n; ++i )
1328  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1329 
1330  if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
1331  {
1332  delete[] x;
1333  delete[] gradient;
1334  delete[] hessian;
1335  return false;
1336  }
1337 
1338  if( gradient != NULL )
1339  {
1340  ty[1] = 0.0;
1341  for( size_t i = 0; i < n; ++i )
1342  ty[1] += gradient[i] * tx[i * (p+1) + 1];
1343  }
1344 
1345  if( hessian != NULL )
1346  {
1347  assert(gradient != NULL);
1348 
1349  ty[2] = 0.0;
1350  for( size_t i = 0; i < n; ++i )
1351  {
1352  for( size_t j = 0; j < n; ++j )
1353  ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1354 
1355  ty[2] += gradient[i] * tx[i * (p+1) + 2];
1356  }
1357  }
1358 
1359  delete[] x;
1360  delete[] gradient;
1361  delete[] hessian;
1362 
1363  /* higher order derivatives not implemented */
1364  if( p > 2 )
1365  return false;
1366 
1367  return true;
1368  }
1369 
1370  /** reverse sweep of userexpr
1371  *
1372  * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1373  * Note, that there q is our p.
1374  *
1375  * For a scalar variable t, let
1376  * Y(t) = f(X(t))
1377  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1378  * where for x^i the i an index, while for t^i the i is an exponent.
1379  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1380  *
1381  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1382  * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1383  * y^0, y^1, ... are the taylor coefficients of f(x).
1384  *
1385  * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1386  * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1387  * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1388  * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1389  *
1390  * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1391  * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1392  * \f$
1393  * px^l = \partial H / \partial x^l
1394  * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1395  * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1396  * \f$
1397  *
1398  * For p = 0, this means
1399  * \f$
1400  * px^0 = py[0] * \partial F^0 / \partial x^0
1401  * = py[0] * \partial f(x^0) / \partial x^0
1402  * = py[0] * f'(x^0)
1403  * \f$
1404  *
1405  * For p = 1, this means (for l = 0):
1406  * \f[
1407  * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1408  * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1409  * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1410  * \f]
1411  * and (for l=1):
1412  * \[
1413  * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1414  * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1415  * = py[0] * 0 + py[1] * f'(x^0)
1416  * \f]
1417  *
1418  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1419  * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1420  * for p = 0:
1421  * px[i] = (px^0)_i = py[0] * grad[i]
1422  * for p = 1:
1423  * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1424  * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1425  */
1426  bool reverse(
1427  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1428  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1429  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
1430  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1431  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
1432  )
1433  {
1434  assert(expr != NULL);
1435  assert(px.size() == tx.size());
1436  assert(py.size() == p+1);
1437 
1438  size_t n = tx.size() / (p+1);
1439  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1440  assert(n >= 1);
1441 
1442  Type* x = new Type[n];
1443  Type funcval;
1444  Type* gradient = new Type[n];
1445  Type* hessian = NULL;
1446 
1447  if( p == 1 )
1448  hessian = new Type[n*n];
1449 
1450  for( size_t i = 0; i < n; ++i )
1451  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1452 
1453  if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1454  {
1455  delete[] x;
1456  delete[] gradient;
1457  delete[] hessian;
1458  return false;
1459  }
1460 
1461  switch( p )
1462  {
1463  case 0:
1464  // px[j] = (px^0)_j = py[0] * grad[j]
1465  for( size_t i = 0; i < n; ++i )
1466  px[i] = py[0] * gradient[i];
1467  break;
1468 
1469  case 1:
1470  // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1471  // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1472  assert(hessian != NULL);
1473  for( size_t i = 0; i < n; ++i )
1474  {
1475  px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1476  for( size_t j = 0; j < n; ++j )
1477  px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1478 
1479  px[i*2+1] = py[1] * gradient[i];
1480  }
1481  break;
1482 
1483  default:
1484  return false;
1485  }
1486 
1487  return true;
1488  } /*lint !e715*/
1489 
1490  using CppAD::atomic_base<Type>::for_sparse_jac;
1491 
1492  /** computes sparsity of jacobian during a forward sweep
1493  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1494  * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1495  */
1496  bool for_sparse_jac(
1497  size_t q, /**< number of columns in R */
1498  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1499  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1500  )
1501  {
1502  assert(expr != NULL);
1503  assert(s.size() == q);
1504 
1505  size_t n = r.size() / q;
1506  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1507 
1508  // sparsity for S(x) = f'(x) * R
1509  for( size_t j = 0; j < q; j++ )
1510  {
1511  s[j] = false;
1512  for( size_t i = 0; i < n; i++ )
1513  s[j] |= r[i * q + j]; /*lint !e1786*/
1514  }
1515 
1516  return true;
1517  }
1518 
1519  using CppAD::atomic_base<Type>::rev_sparse_jac;
1520 
1521  /** computes sparsity of jacobian during a reverse sweep
1522  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1523  * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1524  */
1525  bool rev_sparse_jac(
1526  size_t q, /**< number of rows in R */
1527  const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1528  CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1529  )
1530  {
1531  assert(expr != NULL);
1532  assert(rt.size() == q);
1533 
1534  size_t n = st.size() / q;
1535  assert(n == (size_t)SCIPexprGetNChildren(expr));
1536 
1537  // sparsity for S(x)^T = f'(x)^T * R^T
1538  for( size_t j = 0; j < q; j++ )
1539  for( size_t i = 0; i < n; i++ )
1540  st[i * q + j] = rt[j];
1541 
1542  return true;
1543  }
1544 
1545  using CppAD::atomic_base<Type>::rev_sparse_hes;
1546 
1547  /** computes sparsity of hessian during a reverse sweep
1548  * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1549  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1550  */
1551  bool rev_sparse_hes(
1552  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1553  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1554  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1555  size_t q, /**< number of columns in S and R */
1556  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1557  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1558  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1559  )
1560  {
1561  assert(expr != NULL);
1562  size_t n = vx.size();
1563  assert((size_t)SCIPexprGetNChildren(expr) == n);
1564  assert(s.size() == 1);
1565  assert(t.size() == n);
1566  assert(r.size() == n * q);
1567  assert(u.size() == q);
1568  assert(v.size() == n * q);
1569 
1570  size_t i, j, k;
1571 
1572  // sparsity for T(x) = S(x) * f'(x)
1573  for( i = 0; i < n; ++i )
1574  t[i] = s[0];
1575 
1576  // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1577  // U(x) = g''(y) * f'(x) * R
1578  // S(x) = g'(y)
1579 
1580  // back propagate the sparsity for U
1581  for( j = 0; j < q; j++ )
1582  for( i = 0; i < n; i++ )
1583  v[ i * q + j] = u[j];
1584 
1585  // include forward Jacobian sparsity in Hessian sparsity
1586  // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1587  if( s[0] )
1588  for( j = 0; j < q; j++ )
1589  for( i = 0; i < n; i++ )
1590  for( k = 0; k < n; ++k )
1591  v[ i * q + j] |= r[ k * q + j];
1592 
1593  return true;
1594  }
1595 
1596 };
1597 
1598 template<class Type>
1599 static
1600 void evalUser(
1601  Type& resultant, /**< resultant */
1602  const Type* args, /**< operands */
1603  SCIP_EXPR* expr /**< expression that holds the user expression */
1604  )
1605 {
1606  assert( args != 0 );
1607  vector<Type> in(args, args + SCIPexprGetNChildren(expr));
1608  vector<Type> out(1);
1609 
1611  u(in, out, (size_t)(void*)expr);
1612 
1613  resultant = out[0];
1614  return;
1615 }
1616 
1617 #else
1618 
1619 template<class Type>
1620 static
1621 void evalUser(
1622  Type& resultant, /**< resultant */
1623  const Type* args, /**< operands */
1624  SCIP_EXPR* expr /**< expression that holds the user expression */
1625  )
1626 {
1627  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1628  "evalUser()",
1629  "Error: user expressions in CppAD not possible without CppAD user atomic facility"
1630  );
1631 }
1632 
1633 #endif
1634 
1635 /** template for evaluation for minimum operator
1636  *
1637  * Only implemented for real numbers, thus gives error by default.
1638  * @todo implement own userad function
1639  */
1640 template<class Type>
1641 static
1642 void evalMin(
1643  Type& resultant, /**< resultant */
1644  const Type& arg1, /**< first operand */
1645  const Type& arg2 /**< second operand */
1646  )
1647 { /*lint --e{715,1764}*/
1648  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1649  "evalMin()",
1650  "Error: Min not implemented for this value type"
1651  );
1652 }
1653 
1654 /** specialization of minimum evaluation for real numbers */
1655 template<>
1656 void evalMin(
1657  CppAD::AD<double>& resultant, /**< resultant */
1658  const CppAD::AD<double>& arg1, /**< first operand */
1659  const CppAD::AD<double>& arg2 /**< second operand */
1660  )
1661 {
1662  resultant = MIN(arg1, arg2);
1663 }
1664 
1665 /** template for evaluation for maximum operator
1666  *
1667  * Only implemented for real numbers, thus gives error by default.
1668  * @todo implement own userad function
1669  */
1670 template<class Type>
1671 static
1672 void evalMax(
1673  Type& resultant, /**< resultant */
1674  const Type& arg1, /**< first operand */
1675  const Type& arg2 /**< second operand */
1676  )
1677 { /*lint --e{715,1764}*/
1678  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1679  "evalMax()",
1680  "Error: Max not implemented for this value type"
1681  );
1682 }
1683 
1684 /** specialization of maximum evaluation for real numbers */
1685 template<>
1686 void evalMax(
1687  CppAD::AD<double>& resultant, /**< resultant */
1688  const CppAD::AD<double>& arg1, /**< first operand */
1689  const CppAD::AD<double>& arg2 /**< second operand */
1690  )
1691 {
1692  resultant = MAX(arg1, arg2);
1693 }
1694 
1695 /** template for evaluation for square-root operator
1696  *
1697  * Default is to use the standard sqrt-function.
1698  */
1699 template<class Type>
1700 static
1701 void evalSqrt(
1702  Type& resultant, /**< resultant */
1703  const Type& arg /**< operand */
1704  )
1705 {
1706  resultant = sqrt(arg);
1707 }
1708 
1709 /** template for evaluation for absolute value operator */
1710 template<class Type>
1711 static
1712 void evalAbs(
1713  Type& resultant, /**< resultant */
1714  const Type& arg /**< operand */
1715  )
1716 {
1717  resultant = abs(arg);
1718 }
1719 
1720 /** specialization of absolute value evaluation for intervals
1721  *
1722  * Use sqrt(x^2) for now @todo implement own userad function.
1723  */
1724 template<>
1725 void evalAbs(
1726  CppAD::AD<SCIPInterval>& resultant, /**< resultant */
1727  const CppAD::AD<SCIPInterval>& arg /**< operand */
1728  )
1729 {
1730  vector<CppAD::AD<SCIPInterval> > in(1, arg);
1731  vector<CppAD::AD<SCIPInterval> > out(1);
1732 
1733  posintpower(in, out, 2);
1734 
1735  resultant = sqrt(out[0]);
1736 }
1737 
1738 /** integer power operation for arbitrary integer exponents */
1739 template<class Type>
1740 static
1741 void evalIntPower(
1742  Type& resultant, /**< resultant */
1743  const Type& arg, /**< operand */
1744  const int exponent /**< exponent */
1745  )
1746 {
1747  if( exponent > 1 )
1748  {
1749  vector<Type> in(1, arg);
1750  vector<Type> out(1);
1751 
1752  posintpower(in, out, exponent);
1753 
1754  resultant = out[0];
1755  return;
1756  }
1757 
1758  if( exponent < -1 )
1759  {
1760  vector<Type> in(1, arg);
1761  vector<Type> out(1);
1762 
1763  posintpower(in, out, -exponent);
1764 
1765  resultant = Type(1.0)/out[0];
1766  return;
1767  }
1768 
1769  if( exponent == 1 )
1770  {
1771  resultant = arg;
1772  return;
1773  }
1774 
1775  if( exponent == 0 )
1776  {
1777  resultant = Type(1.0);
1778  return;
1779  }
1780 
1781  assert(exponent == -1);
1782  resultant = Type(1.0)/arg;
1783 }
1784 
1785 /** CppAD compatible evaluation of an expression for given arguments and parameters */
1786 template<class Type>
1787 static
1788 SCIP_RETCODE eval(
1789  SCIP_EXPR* expr, /**< expression */
1790  const vector<Type>& x, /**< values of variables */
1791  SCIP_Real* param, /**< values of parameters */
1792  Type& val /**< buffer to store expression value */
1793  )
1794 {
1795  Type* buf = 0;
1796 
1797  assert(expr != NULL);
1798 
1799  /* todo use SCIP_MAXCHILD_ESTIMATE as in expression.c */
1800 
1801  if( SCIPexprGetNChildren(expr) )
1802  {
1803  if( BMSallocMemoryArray(&buf, SCIPexprGetNChildren(expr)) == NULL ) /*lint !e666*/
1804  return SCIP_NOMEMORY;
1805 
1806  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1807  {
1808  SCIP_CALL( eval(SCIPexprGetChildren(expr)[i], x, param, buf[i]) );
1809  }
1810  }
1811 
1812  switch(SCIPexprGetOperator(expr))
1813  {
1814  case SCIP_EXPR_VARIDX:
1815  assert(SCIPexprGetOpIndex(expr) < (int)x.size());
1816  val = x[SCIPexprGetOpIndex(expr)];
1817  break;
1818 
1819  case SCIP_EXPR_CONST:
1820  val = SCIPexprGetOpReal(expr);
1821  break;
1822 
1823  case SCIP_EXPR_PARAM:
1824  assert(param != NULL);
1825  val = param[SCIPexprGetOpIndex(expr)];
1826  break;
1827 
1828  case SCIP_EXPR_PLUS:
1829  assert( buf != 0 );
1830  val = buf[0] + buf[1];
1831  break;
1832 
1833  case SCIP_EXPR_MINUS:
1834  assert( buf != 0 );
1835  val = buf[0] - buf[1];
1836  break;
1837 
1838  case SCIP_EXPR_MUL:
1839  assert( buf != 0 );
1840  val = buf[0] * buf[1];
1841  break;
1842 
1843  case SCIP_EXPR_DIV:
1844  assert( buf != 0 );
1845  val = buf[0] / buf[1];
1846  break;
1847 
1848  case SCIP_EXPR_SQUARE:
1849  assert( buf != 0 );
1850  evalIntPower(val, buf[0], 2);
1851  break;
1852 
1853  case SCIP_EXPR_SQRT:
1854  assert( buf != 0 );
1855  evalSqrt(val, buf[0]);
1856  break;
1857 
1858  case SCIP_EXPR_REALPOWER:
1859  assert( buf != 0 );
1860  val = CppAD::pow(buf[0], SCIPexprGetRealPowerExponent(expr));
1861  break;
1862 
1863  case SCIP_EXPR_INTPOWER:
1864  assert( buf != 0 );
1865  evalIntPower(val, buf[0], SCIPexprGetIntPowerExponent(expr));
1866  break;
1867 
1868  case SCIP_EXPR_SIGNPOWER:
1869  assert( buf != 0 );
1870  evalSignPower(val, buf[0], expr);
1871  break;
1872 
1873  case SCIP_EXPR_EXP:
1874  assert( buf != 0 );
1875  val = exp(buf[0]);
1876  break;
1877 
1878  case SCIP_EXPR_LOG:
1879  assert( buf != 0 );
1880  val = log(buf[0]);
1881  break;
1882 
1883  case SCIP_EXPR_SIN:
1884  assert( buf != 0 );
1885  val = sin(buf[0]);
1886  break;
1887 
1888  case SCIP_EXPR_COS:
1889  assert( buf != 0 );
1890  val = cos(buf[0]);
1891  break;
1892 
1893  case SCIP_EXPR_TAN:
1894  assert( buf != 0 );
1895  val = tan(buf[0]);
1896  break;
1897 #ifdef SCIP_DISABLED_CODE /* these operators are currently disabled */
1898  case SCIP_EXPR_ERF:
1899  assert( buf != 0 );
1900  val = erf(buf[0]);
1901  break;
1902 
1903  case SCIP_EXPR_ERFI:
1904  return SCIP_ERROR;
1905 #endif
1906  case SCIP_EXPR_MIN:
1907  assert( buf != 0 );
1908  evalMin(val, buf[0], buf[1]);
1909  break;
1910 
1911  case SCIP_EXPR_MAX:
1912  assert( buf != 0 );
1913  evalMax(val, buf[0], buf[1]);
1914  break;
1915 
1916  case SCIP_EXPR_ABS:
1917  assert( buf != 0 );
1918  evalAbs(val, buf[0]);
1919  break;
1920 
1921  case SCIP_EXPR_SIGN:
1922  assert( buf != 0 );
1923  val = sign(buf[0]);
1924  break;
1925 
1926  case SCIP_EXPR_SUM:
1927  assert( buf != 0 );
1928  val = 0.0;
1929  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1930  val += buf[i];
1931  break;
1932 
1933  case SCIP_EXPR_PRODUCT:
1934  assert( buf != 0 );
1935  val = 1.0;
1936  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1937  val *= buf[i];
1938  break;
1939 
1940  case SCIP_EXPR_LINEAR:
1941  {
1942  SCIP_Real* coefs;
1943 
1944  coefs = SCIPexprGetLinearCoefs(expr);
1945  assert(coefs != NULL || SCIPexprGetNChildren(expr) == 0);
1946 
1947  assert( buf != 0 );
1948  val = SCIPexprGetLinearConstant(expr);
1949  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1950  val += coefs[i] * buf[i]; /*lint !e613*/
1951  break;
1952  }
1953 
1954  case SCIP_EXPR_QUADRATIC:
1955  {
1956  SCIP_Real* lincoefs;
1957  SCIP_QUADELEM* quadelems;
1958  int nquadelems;
1959  SCIP_Real sqrcoef;
1960  Type lincoef;
1961  vector<Type> in(1);
1962  vector<Type> out(1);
1963 
1964  assert( buf != 0 );
1965 
1966  lincoefs = SCIPexprGetQuadLinearCoefs(expr);
1967  nquadelems = SCIPexprGetNQuadElements(expr);
1968  quadelems = SCIPexprGetQuadElements(expr);
1969  assert(quadelems != NULL || nquadelems == 0);
1970 
1971  SCIPexprSortQuadElems(expr);
1972 
1973  val = SCIPexprGetQuadConstant(expr);
1974 
1975  /* for each argument, we collect it's linear index from lincoefs, it's square coefficients and all factors from bilinear terms
1976  * then we compute the interval sqrcoef*x^2 + lincoef*x and add it to result */
1977  int i = 0;
1978  for( int argidx = 0; argidx < SCIPexprGetNChildren(expr); ++argidx )
1979  {
1980  if( i == nquadelems || quadelems[i].idx1 > argidx ) /*lint !e613*/
1981  {
1982  /* there are no quadratic terms with argidx in its first argument, that should be easy to handle */
1983  if( lincoefs != NULL )
1984  val += lincoefs[argidx] * buf[argidx];
1985  continue;
1986  }
1987 
1988  sqrcoef = 0.0;
1989  lincoef = lincoefs != NULL ? lincoefs[argidx] : 0.0;
1990 
1991  assert(i < nquadelems && quadelems[i].idx1 == argidx); /*lint !e613*/
1992  do
1993  {
1994  if( quadelems[i].idx2 == argidx ) /*lint !e613*/
1995  sqrcoef += quadelems[i].coef; /*lint !e613*/
1996  else
1997  lincoef += quadelems[i].coef * buf[quadelems[i].idx2]; /*lint !e613*/
1998  ++i;
1999  } while( i < nquadelems && quadelems[i].idx1 == argidx ); /*lint !e613*/
2000  assert(i == nquadelems || quadelems[i].idx1 > argidx); /*lint !e613*/
2001 
2002  /* this is not as good as what we can get from SCIPintervalQuad, but easy to implement */
2003  if( sqrcoef != 0.0 )
2004  {
2005  in[0] = buf[argidx];
2006  posintpower(in, out, 2);
2007  val += sqrcoef * out[0];
2008  }
2009 
2010  val += lincoef * buf[argidx];
2011  }
2012  assert(i == nquadelems);
2013 
2014  break;
2015  }
2016 
2017  case SCIP_EXPR_POLYNOMIAL:
2018  {
2019  SCIP_EXPRDATA_MONOMIAL** monomials;
2020  Type childval;
2021  Type monomialval;
2022  SCIP_Real exponent;
2023  int nmonomials;
2024  int nfactors;
2025  int* childidxs;
2026  SCIP_Real* exponents;
2027  int i;
2028  int j;
2029 
2030  assert( buf != 0 );
2031 
2032  val = SCIPexprGetPolynomialConstant(expr);
2033 
2034  nmonomials = SCIPexprGetNMonomials(expr);
2035  monomials = SCIPexprGetMonomials(expr);
2036 
2037  for( i = 0; i < nmonomials; ++i )
2038  {
2039  nfactors = SCIPexprGetMonomialNFactors(monomials[i]);
2040  childidxs = SCIPexprGetMonomialChildIndices(monomials[i]);
2041  exponents = SCIPexprGetMonomialExponents(monomials[i]);
2042  monomialval = SCIPexprGetMonomialCoef(monomials[i]);
2043 
2044  for( j = 0; j < nfactors; ++j )
2045  {
2046  assert(childidxs[j] >= 0);
2047  assert(childidxs[j] < SCIPexprGetNChildren(expr));
2048 
2049  childval = buf[childidxs[j]];
2050  exponent = exponents[j];
2051 
2052  /* cover some special exponents separately to avoid calling expensive pow function */
2053  if( exponent == 0.0 )
2054  continue;
2055  if( exponent == 1.0 )
2056  {
2057  monomialval *= childval;
2058  continue;
2059  }
2060  if( (int)exponent == exponent )
2061  {
2062  Type tmp;
2063  evalIntPower(tmp, childval, (int)exponent);
2064  monomialval *= tmp;
2065  continue;
2066  }
2067  if( exponent == 0.5 )
2068  {
2069  Type tmp;
2070  evalSqrt(tmp, childval);
2071  monomialval *= tmp;
2072  continue;
2073  }
2074  monomialval *= pow(childval, exponent);
2075  }
2076 
2077  val += monomialval;
2078  }
2079 
2080  break;
2081  }
2082 
2083  case SCIP_EXPR_USER:
2084  evalUser(val, buf, expr);
2085  break;
2086 
2087  case SCIP_EXPR_LAST:
2088  default:
2089  BMSfreeMemoryArrayNull(&buf);
2090  return SCIP_ERROR;
2091  }
2092 
2093  BMSfreeMemoryArrayNull(&buf);
2094 
2095  return SCIP_OKAY;
2096 }
2097 
2098 /** analysis an expression tree whether it requires retaping on every evaluation
2099  *
2100  * This may be the case if the evaluation sequence depends on values of operands (e.g., in case of abs, sign, signpower, ...).
2101  */
2102 static
2103 void analyzeTree(
2104  SCIP_EXPRINTDATA* data,
2105  SCIP_EXPR* expr
2106  )
2107 {
2108  assert(expr != NULL);
2109  assert(SCIPexprGetChildren(expr) != NULL || SCIPexprGetNChildren(expr) == 0);
2110 
2111  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
2112  analyzeTree(data, SCIPexprGetChildren(expr)[i]);
2113 
2114  switch( SCIPexprGetOperator(expr) )
2115  {
2116  case SCIP_EXPR_MIN:
2117  case SCIP_EXPR_MAX:
2118  case SCIP_EXPR_ABS:
2119 #ifdef NO_CPPAD_USER_ATOMIC
2120  case SCIP_EXPR_SIGNPOWER:
2121 #endif
2122  data->need_retape_always = true;
2123  break;
2124 
2125  case SCIP_EXPR_USER:
2126  data->userevalcapability &= SCIPexprGetUserEvalCapability(expr);
2127  break;
2128 
2129  default: ;
2130  } /*lint !e788*/
2131 
2132 }
2133 
2134 /** replacement for CppAD's default error handler
2135  *
2136  * In debug mode, CppAD gives an error when an evaluation contains a nan.
2137  * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
2138  * Since we cannot ignore this particular error, we ignore all.
2139  * @todo find a way to check whether the error corresponds to a nan and communicate this back
2140  */
2141 static
2142 void cppaderrorcallback(
2143  bool known, /**< is the error from a known source? */
2144  int line, /**< line where error occured */
2145  const char* file, /**< file where error occured */
2146  const char* cond, /**< error condition */
2147  const char* msg /**< error message */
2148  )
2149 {
2150  SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
2151 }
2152 
2153 /* install our error handler */
2154 static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
2155 
2156 /** gets name and version of expression interpreter */
2157 const char* SCIPexprintGetName(void)
2158 {
2159  return CPPAD_PACKAGE_STRING;
2160 }
2161 
2162 /** gets descriptive text of expression interpreter */
2163 const char* SCIPexprintGetDesc(void)
2164 {
2165  return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (www.coin-or.org/CppAD)";
2166 }
2167 
2168 /** gets capabilities of expression interpreter (using bitflags) */
2170  void
2171  )
2172 {
2176 }
2177 
2178 /** creates an expression interpreter object */
2180  BMS_BLKMEM* blkmem, /**< block memory data structure */
2181  SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
2182  )
2183 {
2184  assert(blkmem != NULL);
2185  assert(exprint != NULL);
2186 
2187  if( BMSallocMemory(exprint) == NULL )
2188  return SCIP_NOMEMORY;
2189 
2190  (*exprint)->blkmem = blkmem;
2191 
2192  return SCIP_OKAY;
2193 }
2194 
2195 /** frees an expression interpreter object */
2197  SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
2198  )
2199 {
2200  assert( exprint != NULL);
2201  assert(*exprint != NULL);
2202 
2203  BMSfreeMemory(exprint);
2204 
2205  return SCIP_OKAY;
2206 }
2207 
2208 /** compiles an expression tree and stores compiled data in expression tree */
2210  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2211  SCIP_EXPRTREE* tree /**< expression tree */
2212  )
2213 { /*lint --e{429} */
2214  assert(tree != NULL);
2215 
2217  if (!data)
2218  {
2219  data = new SCIP_EXPRINTDATA();
2220  assert( data != NULL );
2221  SCIPexprtreeSetInterpreterData(tree, data);
2222  SCIPdebugMessage("set interpreter data in tree %p to %p\n", (void*)tree, (void*)data);
2223  }
2224  else
2225  {
2226  data->need_retape = true;
2227  data->int_need_retape = true;
2228  }
2229 
2230  int n = SCIPexprtreeGetNVars(tree);
2231 
2232  data->X.resize(n);
2233  data->x.resize(n);
2234  data->Y.resize(1);
2235 
2236  data->int_X.resize(n);
2237  data->int_x.resize(n);
2238  data->int_Y.resize(1);
2239 
2240  if( data->root != NULL )
2241  {
2242  SCIPexprFreeDeep(exprint->blkmem, &data->root);
2243  }
2244 
2245  SCIP_EXPR* root = SCIPexprtreeGetRoot(tree);
2246 
2247  SCIP_CALL( SCIPexprCopyDeep(exprint->blkmem, &data->root, root) );
2248 
2249  data->blkmem = exprint->blkmem;
2250 
2251  analyzeTree(data, data->root);
2252 
2253  return SCIP_OKAY;
2254 }
2255 
2256 
2257 /** gives the capability to evaluate an expression by the expression interpreter
2258  *
2259  * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
2260  * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
2261  * Hessian for an expression is not available because it contains a user expression that does not provide
2262  * Hessians.
2263  */
2265  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2266  SCIP_EXPRTREE* tree /**< expression tree */
2267  )
2268 {
2269  assert(tree != NULL);
2270 
2272  assert(data != NULL);
2273 
2274  return data->userevalcapability;
2275 }/*lint !e715*/
2276 
2277 /** frees interpreter data */
2279  SCIP_EXPRINTDATA** interpreterdata /**< interpreter data that should freed */
2280  )
2281 {
2282  assert( interpreterdata != NULL);
2283  assert(*interpreterdata != NULL);
2284 
2285  if( (*interpreterdata)->root != NULL )
2286  SCIPexprFreeDeep((*interpreterdata)->blkmem, &(*interpreterdata)->root);
2287 
2288  delete *interpreterdata;
2289  *interpreterdata = NULL;
2290 
2291  return SCIP_OKAY;
2292 }
2293 
2294 /** notify expression interpreter that a new parameterization is used
2295  *
2296  * This probably causes retaping by AD algorithms.
2297  */
2299  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2300  SCIP_EXPRTREE* tree /**< expression tree */
2301  )
2302 {
2303  assert(exprint != NULL);
2304  assert(tree != NULL);
2305 
2307  if( data != NULL )
2308  {
2309  data->need_retape = true;
2310  data->int_need_retape = true;
2311  }
2312 
2313  return SCIP_OKAY;
2314 }
2315 
2316 /** evaluates an expression tree */
2318  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2319  SCIP_EXPRTREE* tree, /**< expression tree */
2320  SCIP_Real* varvals, /**< values of variables */
2321  SCIP_Real* val /**< buffer to store value */
2322  )
2323 {
2324  SCIP_EXPRINTDATA* data;
2325 
2326  assert(exprint != NULL);
2327  assert(tree != NULL);
2328  assert(varvals != NULL);
2329  assert(val != NULL);
2330 
2331  data = SCIPexprtreeGetInterpreterData(tree);
2332  assert(data != NULL);
2333  assert(SCIPexprtreeGetNVars(tree) == (int)data->X.size());
2334  assert(SCIPexprtreeGetRoot(tree) != NULL);
2335 
2336  int n = SCIPexprtreeGetNVars(tree);
2337 
2338  if( n == 0 )
2339  {
2340  SCIP_CALL( SCIPexprtreeEval(tree, NULL, val) );
2341  return SCIP_OKAY;
2342  }
2343 
2344  if( data->need_retape_always || data->need_retape )
2345  {
2346  for( int i = 0; i < n; ++i )
2347  {
2348  data->X[i] = varvals[i];
2349  data->x[i] = varvals[i];
2350  }
2351 
2352  CppAD::Independent(data->X);
2353 
2354  if( data->root != NULL )
2355  SCIP_CALL( eval(data->root, data->X, SCIPexprtreeGetParamVals(tree), data->Y[0]) );
2356  else
2357  data->Y[0] = 0.0;
2358 
2359  data->f.Dependent(data->X, data->Y);
2360 
2361  data->val = Value(data->Y[0]);
2362  SCIPdebugMessage("Eval retaped and computed value %g\n", data->val);
2363 
2364  // the following is required if the gradient shall be computed by a reverse sweep later
2365  // data->val = data->f.Forward(0, data->x)[0];
2366 
2367  data->need_retape = false;
2368  }
2369  else
2370  {
2371  assert((int)data->x.size() >= n);
2372  for( int i = 0; i < n; ++i )
2373  data->x[i] = varvals[i];
2374 
2375  data->val = data->f.Forward(0, data->x)[0]; /*lint !e1793*/
2376  SCIPdebugMessage("Eval used forward sweep to compute value %g\n", data->val);
2377  }
2378 
2379  *val = data->val;
2380 
2381  return SCIP_OKAY;
2382 }
2383 
2384 /** evaluates an expression tree on intervals */
2386  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2387  SCIP_EXPRTREE* tree, /**< expression tree */
2388  SCIP_Real infinity, /**< value for infinity */
2389  SCIP_INTERVAL* varvals, /**< interval values of variables */
2390  SCIP_INTERVAL* val /**< buffer to store interval value of expression */
2391  )
2392 {
2393  SCIP_EXPRINTDATA* data;
2394 
2395  assert(exprint != NULL);
2396  assert(tree != NULL);
2397  assert(varvals != NULL);
2398  assert(val != NULL);
2399 
2400  data = SCIPexprtreeGetInterpreterData(tree);
2401  assert(data != NULL);
2402  assert(SCIPexprtreeGetNVars(tree) == (int)data->int_X.size());
2403  assert(SCIPexprtreeGetRoot(tree) != NULL);
2404 
2405  int n = SCIPexprtreeGetNVars(tree);
2406 
2407  if( n == 0 )
2408  {
2409  SCIP_CALL( SCIPexprtreeEvalInt(tree, infinity, NULL, val) );
2410  return SCIP_OKAY;
2411  }
2412 
2413  SCIPInterval::infinity = infinity;
2414 
2415  if( data->int_need_retape || data->need_retape_always )
2416  {
2417  for( int i = 0; i < n; ++i )
2418  {
2419  data->int_X[i] = varvals[i];
2420  data->int_x[i] = varvals[i];
2421  }
2422 
2423  CppAD::Independent(data->int_X);
2424 
2425  if( data->root != NULL )
2426  SCIP_CALL( eval(data->root, data->int_X, SCIPexprtreeGetParamVals(tree), data->int_Y[0]) );
2427  else
2428  data->int_Y[0] = 0.0;
2429 
2430  data->int_f.Dependent(data->int_X, data->int_Y);
2431 
2432  data->int_val = Value(data->int_Y[0]);
2433 
2434  data->int_need_retape = false;
2435  }
2436  else
2437  {
2438  assert((int)data->int_x.size() >= n);
2439  for( int i = 0; i < n; ++i )
2440  data->int_x[i] = varvals[i];
2441 
2442  data->int_val = data->int_f.Forward(0, data->int_x)[0]; /*lint !e1793*/
2443  }
2444 
2445  *val = data->int_val;
2446 
2447  return SCIP_OKAY;
2448 }
2449 
2450 /** computes value and gradient of an expression tree */
2452  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2453  SCIP_EXPRTREE* tree, /**< expression tree */
2454  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2455  SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
2456  SCIP_Real* val, /**< buffer to store expression value */
2457  SCIP_Real* gradient /**< buffer to store expression gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2458  )
2459 {
2460  assert(exprint != NULL);
2461  assert(tree != NULL);
2462  assert(varvals != NULL || new_varvals == FALSE);
2463  assert(val != NULL);
2464  assert(gradient != NULL);
2465 
2467  assert(data != NULL);
2468 
2469  if( new_varvals )
2470  {
2471  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2472  }
2473  else
2474  *val = data->val;
2475 
2476  int n = SCIPexprtreeGetNVars(tree);
2477 
2478  if( n == 0 )
2479  return SCIP_OKAY;
2480 
2481  vector<double> jac(data->f.Jacobian(data->x));
2482 
2483  for( int i = 0; i < n; ++i )
2484  gradient[i] = jac[i];
2485 
2486 /* disable debug output since we have no message handler here
2487 #ifdef SCIP_DEBUG
2488  SCIPdebugMessage("Grad for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2489  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2490  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t %g", gradient[i]); printf("\n");
2491 #endif
2492 */
2493 
2494  return SCIP_OKAY;
2495 }
2496 
2497 /** computes interval value and interval gradient of an expression tree */
2499  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2500  SCIP_EXPRTREE* tree, /**< expression tree */
2501  SCIP_Real infinity, /**< value for infinity */
2502  SCIP_INTERVAL* varvals, /**< interval values of variables, can be NULL if new_varvals is FALSE */
2503  SCIP_Bool new_varvals, /**< have variable interval values changed since last call to an interval evaluation routine? */
2504  SCIP_INTERVAL* val, /**< buffer to store expression interval value */
2505  SCIP_INTERVAL* gradient /**< buffer to store expression interval gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2506  )
2507 {
2508  assert(exprint != NULL);
2509  assert(tree != NULL);
2510  assert(varvals != NULL || new_varvals == false);
2511  assert(val != NULL);
2512  assert(gradient != NULL);
2513 
2515  assert(data != NULL);
2516 
2517  if (new_varvals)
2518  SCIP_CALL( SCIPexprintEvalInt(exprint, tree, infinity, varvals, val) );
2519  else
2520  *val = data->int_val;
2521 
2522  int n = SCIPexprtreeGetNVars(tree);
2523 
2524  if( n == 0 )
2525  return SCIP_OKAY;
2526 
2527  vector<SCIPInterval> jac(data->int_f.Jacobian(data->int_x));
2528 
2529  for (int i = 0; i < n; ++i)
2530  gradient[i] = jac[i];
2531 
2532 /* disable debug output since we have no message handler here
2533 #ifdef SCIP_DEBUG
2534  SCIPdebugMessage("GradInt for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2535  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(data->int_x[i]), SCIPintervalGetSup(data->int_x[i])); printf("\n");
2536  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(gradient[i]), SCIPintervalGetSup(gradient[i])); printf("\n");
2537 #endif
2538 */
2539 
2540  return SCIP_OKAY;
2541 }
2542 
2543 /** gives sparsity pattern of hessian
2544  *
2545  * NOTE: this function might be replaced later by something nicer.
2546  * Since the AD code might need to do a forward sweep, you should pass variable values in here.
2547  */
2549  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2550  SCIP_EXPRTREE* tree, /**< expression tree */
2551  SCIP_Real* varvals, /**< values of variables */
2552  SCIP_Bool* sparsity /**< buffer to store sparsity pattern of Hessian, sparsity[i+n*j] indicates whether entry (i,j) is nonzero in the hessian */
2553  )
2554 {
2555  assert(exprint != NULL);
2556  assert(tree != NULL);
2557  assert(varvals != NULL);
2558  assert(sparsity != NULL);
2559 
2561  assert(data != NULL);
2562 
2563  int n = SCIPexprtreeGetNVars(tree);
2564  if( n == 0 )
2565  return SCIP_OKAY;
2566 
2567  int nn = n*n;
2568 
2569  if( data->need_retape_always )
2570  {
2571  // @todo can we do something better here, e.g., by looking at the expression tree by ourself?
2572 
2573  for( int i = 0; i < nn; ++i )
2574  sparsity[i] = TRUE;
2575 
2576 /* disable debug output since we have no message handler here
2577 #ifdef SCIP_DEBUG
2578  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2579  SCIPdebugMessage("sparsity = all elements, due to discontinuouities\n");
2580 #endif
2581 */
2582 
2583  return SCIP_OKAY;
2584  }
2585 
2586  if( data->need_retape )
2587  {
2588  SCIP_Real val;
2589  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, &val) );
2590  }
2591 
2592  SCIPdebugMessage("calling ForSparseJac\n");
2593 
2594  vector<bool> r(nn, false);
2595  for (int i = 0; i < n; ++i)
2596  r[i*n+i] = true; /*lint !e647 !e1793*/
2597  (void) data->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
2598 
2599  SCIPdebugMessage("calling RevSparseHes\n");
2600 
2601  vector<bool> s(1, true);
2602  vector<bool> sparsehes(data->f.RevSparseHes(n, s));
2603 
2604  for( int i = 0; i < nn; ++i )
2605  sparsity[i] = sparsehes[i];
2606 
2607 /* disable debug output since we have no message handler here
2608 #ifdef SCIP_DEBUG
2609  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2610  SCIPdebugMessage("sparsity ="); for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) if (sparsity[i*n+j]) printf(" (%d,%d)", i, j); printf("\n");
2611 #endif
2612 */
2613 
2614  return SCIP_OKAY;
2615 }
2616 
2617 /** computes value and dense hessian of an expression tree
2618  *
2619  * The full hessian is computed (lower left and upper right triangle).
2620  */
2622  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2623  SCIP_EXPRTREE* tree, /**< expression tree */
2624  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2625  SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
2626  SCIP_Real* val, /**< buffer to store function value */
2627  SCIP_Real* hessian /**< buffer to store hessian values, need to have size at least n*n */
2628  )
2629 {
2630  assert(exprint != NULL);
2631  assert(tree != NULL);
2632  assert(varvals != NULL || new_varvals == FALSE);
2633  assert(val != NULL);
2634  assert(hessian != NULL);
2635 
2637  assert(data != NULL);
2638 
2639  if( new_varvals )
2640  {
2641  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2642  }
2643  else
2644  *val = data->val;
2645 
2646  int n = SCIPexprtreeGetNVars(tree);
2647 
2648  if( n == 0 )
2649  return SCIP_OKAY;
2650 
2651 #if 1
2652  /* this one uses reverse mode */
2653  vector<double> hess(data->f.Hessian(data->x, 0));
2654 
2655  int nn = n*n;
2656  for (int i = 0; i < nn; ++i)
2657  hessian[i] = hess[i];
2658 
2659 #else
2660  /* this one uses forward mode */
2661  for( int i = 0; i < n; ++i )
2662  for( int j = 0; j < n; ++j )
2663  {
2664  vector<int> ii(1,i);
2665  vector<int> jj(1,j);
2666  hessian[i*n+j] = data->f.ForTwo(data->x, ii, jj)[0];
2667  }
2668 #endif
2669 
2670 /* disable debug output since we have no message handler here
2671 #ifdef SCIP_DEBUG
2672  SCIPdebugMessage("HessianDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2673  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2674  SCIPdebugMessage("hess ="); for (int i = 0; i < n*n; ++i) printf("\t %g", hessian[i]); printf("\n");
2675 #endif
2676 */
2677  return SCIP_OKAY;
2678 }
SCIP_RETCODE SCIPexprintEval(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
SCIPInterval square(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintHessianDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *hessian)
bool LessThanZero(const SCIPInterval &x)
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprtreeCapability(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
const char * SCIPexprintGetName(void)
bool IdenticalEqualPar(const SCIPInterval &x, const SCIPInterval &y)
methods to interpret (evaluate) an expression tree "fast"
SCIP_RETCODE SCIPexprtreeEval(SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
int SCIPexprGetNChildren(SCIP_EXPR *expr)
void SCIPexprtreeSetInterpreterData(SCIP_EXPRTREE *tree, SCIP_EXPRINTDATA *interpreterdata)
struct SCIP_QuadElement SCIP_QUADELEM
Definition: type_expr.h:106
SCIP_QUADELEM * SCIPexprGetQuadElements(SCIP_EXPR *expr)
SCIP_EXPRINTCAPABILITY SCIPexprGetUserEvalCapability(SCIP_EXPR *expr)
const char * SCIPexprintGetDesc(void)
SCIPInterval pow(const SCIPInterval &x, const SCIPInterval &y)
SCIP_Real SCIPexprGetMonomialCoef(SCIP_EXPRDATA_MONOMIAL *monomial)
#define NULL
Definition: lpi_spx.cpp:130
struct SCIP_ExprData_Monomial SCIP_EXPRDATA_MONOMIAL
Definition: type_expr.h:109
#define SCIP_EXPRINTCAPABILITY_INTGRADIENT
SCIPInterval cos(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintFree(SCIP_EXPRINT **exprint)
SCIP_Real * SCIPexprGetMonomialExponents(SCIP_EXPRDATA_MONOMIAL *monomial)
int SCIPexprtreeGetNVars(SCIP_EXPRTREE *tree)
#define FALSE
Definition: def.h:56
SCIP_RETCODE SCIPexprintGrad(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
#define TRUE
Definition: def.h:55
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:53
#define SCIP_CALL(x)
Definition: def.h:266
SCIP_Real SCIPexprGetSignPowerExponent(SCIP_EXPR *expr)
static SCIP_Real infinity
SCIPInterval exp(const SCIPInterval &x)
SCIP_RETCODE SCIPexprCopyDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **targetexpr, SCIP_EXPR *sourceexpr)
#define CPPAD_MAX_NUM_THREADS
#define SCIPdebugMessage
Definition: pub_message.h:77
SCIP_EXPRINTDATA * SCIPexprtreeGetInterpreterData(SCIP_EXPRTREE *tree)
int SCIPexprGetNMonomials(SCIP_EXPR *expr)
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
unsigned int SCIP_EXPRINTCAPABILITY
int SCIPexprGetNQuadElements(SCIP_EXPR *expr)
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
SCIPInterval abs(const SCIPInterval &x)
public methods for expressions, expression trees, expression graphs, and related stuff ...
#define SCIP_EXPRINTCAPABILITY_INTFUNCVALUE
void SCIPexprSortQuadElems(SCIP_EXPR *expr)
SCIPInterval CondExpOp(enum CppAD::CompareOp cop, const SCIPInterval &left, const SCIPInterval &right, const SCIPInterval &trueCase, const SCIPInterval &falseCase)
SCIPInterval signpow(const SCIPInterval &x, const SCIP_Real p)
bool IdenticalOne(const SCIPInterval &x)
int * SCIPexprGetMonomialChildIndices(SCIP_EXPRDATA_MONOMIAL *monomial)
SCIP_Real SCIPexprGetQuadConstant(SCIP_EXPR *expr)
SCIP_RETCODE SCIPexprintFreeData(SCIP_EXPRINTDATA **interpreterdata)
void evalAbs(CppAD::AD< SCIPInterval > &resultant, const CppAD::AD< SCIPInterval > &arg)
SCIP_EXPR * SCIPexprtreeGetRoot(SCIP_EXPRTREE *tree)
SCIPInterval sqrt(const SCIPInterval &x)
void evalMin(CppAD::AD< double > &resultant, const CppAD::AD< double > &arg1, const CppAD::AD< double > &arg2)
SCIPInterval sign(const SCIPInterval &x)
SCIP_RETCODE SCIPexprtreeEvalInt(SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
#define SIGN(x)
SCIP_RETCODE SCIPexprintEvalInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
SCIP_RETCODE SCIPexprEvalUser(SCIP_EXPR *expr, SCIP_Real *argvals, SCIP_Real *val, SCIP_Real *gradient, SCIP_Real *hessian)
SCIP_EXPRDATA_MONOMIAL ** SCIPexprGetMonomials(SCIP_EXPR *expr)
SCIP_RETCODE exprEvalUser(SCIP_EXPR *expr, Type *x, Type &funcval, Type *gradient, Type *hessian)
SCIP_RETCODE SCIPexprintHessianSparsityDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool *sparsity)
SCIP_Real SCIPexprGetOpReal(SCIP_EXPR *expr)
SCIP_RETCODE SCIPexprEvalIntUser(SCIP_EXPR *expr, SCIP_Real infinity, SCIP_INTERVAL *argvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient, SCIP_INTERVAL *hessian)
struct SCIP_ExprTree SCIP_EXPRTREE
Definition: type_expr.h:92
SCIP_Real SCIPexprGetRealPowerExponent(SCIP_EXPR *expr)
#define SCIP_EXPRINTCAPABILITY_ALL
int SCIPexprGetMonomialNFactors(SCIP_EXPRDATA_MONOMIAL *monomial)
#define SCIP_EXPRINTCAPABILITY_HESSIAN
bool IdenticalZero(const SCIPInterval &x)
#define SCIP_Bool
Definition: def.h:53
void SCIPexprFreeDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **expr)
SCIPInterval sin(const SCIPInterval &x)
int SCIPexprGetOpIndex(SCIP_EXPR *expr)
#define MAX(x, y)
Definition: tclique_def.h:75
SCIP_Real SCIPexprGetPolynomialConstant(SCIP_EXPR *expr)
struct SCIP_ExprInt SCIP_EXPRINT
SCIP_Real * SCIPexprtreeGetParamVals(SCIP_EXPRTREE *tree)
SCIP_RETCODE SCIPexprintNewParametrization(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIPInterval log(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintGradInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_Bool new_varvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient)
bool GreaterThanZero(const SCIPInterval &x)
bool EqualOpSeq(const SCIPInterval &x, const SCIPInterval &y)
void evalMax(CppAD::AD< double > &resultant, const CppAD::AD< double > &arg1, const CppAD::AD< double > &arg2)
bool IdenticalPar(const SCIPInterval &x)
#define SCIP_DEFAULT_INFINITY
Definition: def.h:132
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
SCIP_RETCODE SCIPexprintCreate(BMS_BLKMEM *blkmem, SCIP_EXPRINT **exprint)
#define REALABS(x)
Definition: def.h:151
std::ostream & operator<<(std::ostream &out, const SCIP_INTERVAL &x)
int Integer(const SCIPInterval &x)
SCIP_EXPROP SCIPexprGetOperator(SCIP_EXPR *expr)
#define SCIP_Real
Definition: def.h:127
int SCIPexprGetIntPowerExponent(SCIP_EXPR *expr)
SCIP_Real SCIPexprGetLinearConstant(SCIP_EXPR *expr)
bool LessThanOrZero(const SCIPInterval &x)
SCIP_Real * SCIPexprGetLinearCoefs(SCIP_EXPR *expr)
common defines and data types used in all packages of SCIP
struct SCIP_ExprIntData SCIP_EXPRINTDATA
bool GreaterThanOrZero(const SCIPInterval &x)
#define SCIP_EXPRINTCAPABILITY_GRADIENT
SCIP_Real * SCIPexprGetQuadLinearCoefs(SCIP_EXPR *expr)
C++ extensions to interval arithmetics for provable bounds.
SCIP_RETCODE SCIPexprintCompile(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
struct SCIP_Expr SCIP_EXPR
Definition: type_expr.h:91