Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright (C) 2002-2018 Konrad-Zuse-Zentrum */
7 /* fuer Informationstechnik Berlin */
8 /* */
9 /* SCIP is distributed under the terms of the ZIB Academic License. */
10 /* */
11 /* You should have received a copy of the ZIB Academic License */
12 /* along with SCIP; see the file COPYING. If not email to scip@zib.de. */
13 /* */
14 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
15 
16 /**@file exprinterpret_cppad.cpp
17  * @brief methods to interpret (evaluate) an expression tree "fast" using CppAD
18  * @ingroup EXPRINTS
19  * @author Stefan Vigerske
20  */
21 
22 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
23 
24 #include "scip/def.h"
25 #include "blockmemshell/memory.h"
26 #include "nlpi/pub_expr.h"
27 #include "nlpi/exprinterpret.h"
28 
29 #include <cmath>
30 #include <vector>
31 using std::vector;
32 
33 /* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
34  * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
35  * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
36  * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
37  * vector<*>::operator[] only. */
38 /*lint --e{747,732}*/
39 
40 /* Turn off lint info "1702 operator '...' is both an ordinary function 'CppAD::operator...' and a member function 'CppAD::SCIPInterval::operator...'.
41  * However, the functions have different signatures (the CppAD working on double, the SCIPInterval member
42  * function working on SCIPInterval's.
43  */
44 /*lint --e{1702}*/
45 
46 /* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivaties of power operators
47  * via CppAD's user-atomic function feature
48  * our customized implementation should give better results (tighter intervals) for the interval data type
49  */
50 /* #define NO_CPPAD_USER_ATOMIC */
51 
52 /** sign of a value (-1 or +1)
53  *
54  * 0.0 has sign +1
55  */
56 #define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
57 
58 /* in order to use intervals as operands in CppAD,
59  * we need to include the intervalarithext.h very early and require the interval operations to be in the CppAD namespace */
60 #define SCIPInterval_NAMESPACE CppAD
61 #include "nlpi/intervalarithext.h"
62 
63 namespace CppAD
64 {
66 }
67 using CppAD::SCIPInterval;
68 
69 /* CppAD needs to know a fixed upper bound on the number of threads at compile time.
70  * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
71  */
72 #ifndef CPPAD_MAX_NUM_THREADS
73 #ifndef NPARASCIP
74 #define CPPAD_MAX_NUM_THREADS 64
75 #else
76 #define CPPAD_MAX_NUM_THREADS 1
77 #endif
78 #endif
79 
80 #include <cppad/cppad.hpp>
81 #include <cppad/utility/error_handler.hpp>
82 
83 /* CppAD is not thread-safe by itself, but uses some static datastructures
84  * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
85  * This allocator requires to know the number of threads and a thread number for each thread.
86  * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
87  */
88 #ifndef NPARASCIP
89 #include <pthread.h>
90 
91 /** mutex for locking in pthread case */
92 static pthread_mutex_t cppadmutex = PTHREAD_MUTEX_INITIALIZER;
93 
94 /** key for accessing thread specific information */
95 static pthread_key_t thread_specific_key;
96 
97 /** currently registered number of threads */
98 static size_t ncurthreads = 0;
99 
100 /** CppAD callback function that indicates whether we are running in parallel mode */
101 static
102 bool in_parallel(void)
103 {
104  return ncurthreads > 1;
105 }
106 
107 /** CppAD callback function that returns the number of the current thread
108  *
109  * assigns a new number to the thread if new
110  */
111 static
112 size_t thread_num(void)
113 {
114  size_t threadnum;
115  void* specific;
116 
117  specific = pthread_getspecific(thread_specific_key);
118 
119  /* if no data for this thread yet, then assign a new thread number to the current thread
120  * we store the thread number incremented by one, to distinguish the absence of data (=0) from existing data
121  */
122  if( specific == NULL )
123  {
124  pthread_mutex_lock(&cppadmutex);
125 
126  SCIPdebugMessage("Assigning thread number %lu to thread %p.\n", (long unsigned int)ncurthreads, (void*)pthread_self());
127 
128  pthread_setspecific(thread_specific_key, (void*)(ncurthreads + 1));
129 
130  threadnum = ncurthreads;
131 
132  ++ncurthreads;
133 
134  pthread_mutex_unlock(&cppadmutex);
135 
136  assert(pthread_getspecific(thread_specific_key) != NULL);
137  assert((size_t)pthread_getspecific(thread_specific_key) == threadnum + 1);
138  }
139  else
140  {
141  threadnum = (size_t)(specific) - 1;
142  }
143 
144  assert(threadnum < ncurthreads);
145 
146  return threadnum;
147 }
148 
149 /** sets up CppAD's datastructures for running in multithreading mode
150  *
151  * It must be called once before multithreading is started.
152  */
153 static
154 char init_parallel(void)
155 {
156  pthread_key_create(&thread_specific_key, NULL);
157 
158  CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
159  CppAD::parallel_ad<double>();
160  CppAD::parallel_ad<SCIPInterval>();
161 
162  return 0;
163 }
164 
165 /** a dummy variable that is initialized to the result of init_parallel
166  *
167  * The purpose is to make sure that init_parallel() is called before any multithreading is started.
168  */
169 #if !defined(_MSC_VER)
170 __attribute__ ((unused))
171 #endif
172 static char init_parallel_return = init_parallel();
173 
174 #endif // NPARASCIP
175 
176 /** definition of CondExpOp for SCIPInterval (required by CppAD) */
177 inline
178 SCIPInterval CondExpOp(
179  enum CppAD::CompareOp cop,
180  const SCIPInterval& left,
181  const SCIPInterval& right,
182  const SCIPInterval& trueCase,
183  const SCIPInterval& falseCase)
184 { /*lint --e{715}*/
185  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
186  "SCIPInterval CondExpOp(...)",
187  "Error: cannot use CondExp with an interval type"
188  );
189 
190  return SCIPInterval();
191 }
192 
193 /** another function required by CppAD */
194 inline
196  const SCIPInterval& x /**< operand */
197  )
198 { /*lint --e{715}*/
199  return true;
200 }
201 
202 /** returns whether the interval equals [0,0] */
203 inline
205  const SCIPInterval& x /**< operand */
206  )
207 {
208  return (x == 0.0);
209 }
210 
211 /** returns whether the interval equals [1,1] */
212 inline
214  const SCIPInterval& x /**< operand */
215  )
216 {
217  return (x == 1.0);
218 }
219 
220 /** yet another function that checks whether two intervals are equal */
221 inline
223  const SCIPInterval& x, /**< first operand */
224  const SCIPInterval& y /**< second operand */
225  )
226 {
227  return (x == y);
228 }
229 
230 /** greater than zero not defined for intervals */
231 inline
233  const SCIPInterval& x /**< operand */
234  )
235 { /*lint --e{715}*/
236  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
237  "GreaterThanZero(x)",
238  "Error: cannot use GreaterThanZero with interval"
239  );
240 
241  return false;
242 }
243 
244 /** greater than or equal zero not defined for intervals */
245 inline
247  const SCIPInterval& x /**< operand */
248  )
249 { /*lint --e{715}*/
250  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__ ,
251  "GreaterThanOrZero(x)",
252  "Error: cannot use GreaterThanOrZero with interval"
253  );
254 
255  return false;
256 }
257 
258 /** less than not defined for intervals */
259 inline
261  const SCIPInterval& x /**< operand */
262  )
263 { /*lint --e{715}*/
264  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
265  "LessThanZero(x)",
266  "Error: cannot use LessThanZero with interval"
267  );
268 
269  return false;
270 }
271 
272 /** less than or equal not defined for intervals */
273 inline
275  const SCIPInterval& x /**< operand */
276  )
277 { /*lint --e{715}*/
278  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
279  "LessThanOrZero(x)",
280  "Error: cannot use LessThanOrZero with interval"
281  );
282 
283  return false;
284 }
285 
286 /** conversion to integers not defined for intervals */
287 inline
289  const SCIPInterval& x /**< operand */
290  )
291 { /*lint --e{715}*/
292  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
293  "Integer(x)",
294  "Error: cannot use Integer with interval"
295  );
296 
297  return 0;
298 }
299 
300 /** absolute zero multiplication
301  *
302  * @return [0,0] if first argument is [0,0] independent of whether the second argument is an empty interval or not
303  */
304 inline
305 SCIPInterval azmul(
306  const SCIPInterval& x, /**< first operand */
307  const SCIPInterval& y /**< second operand */
308  )
309 {
310  if( x.inf == 0.0 && x.sup == 0.0 )
311  return SCIPInterval(0.0, 0.0);
312  return x * y;
313 }
314 
315 /** printing of an interval (required by CppAD) */
316 inline
317 std::ostream& operator<<(std::ostream& out, const SCIP_INTERVAL& x)
318 {
319  out << '[' << x.inf << ',' << x.sup << ']';
320  return out;
321 }
322 
323 using CppAD::AD;
324 
325 /** expression interpreter */
327 {
328  BMS_BLKMEM* blkmem; /**< block memory data structure */
329 };
330 
331 /** expression specific interpreter data */
332 struct SCIP_ExprIntData
333 {
334 public:
335  /** constructor */
336  SCIP_ExprIntData()
337  : val(0.0), need_retape(true), int_need_retape(true), need_retape_always(false), userevalcapability(SCIP_EXPRINTCAPABILITY_ALL), blkmem(NULL), root(NULL)
338  { }
339 
340  /** destructor */
341  ~SCIP_ExprIntData()
342  { }/*lint --e{1540}*/
343 
344  vector< AD<double> > X; /**< vector of dependent variables */
345  vector< AD<double> > Y; /**< result vector */
346  CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
347 
348  vector<double> x; /**< current values of dependent variables */
349  double val; /**< current function value */
350  bool need_retape; /**< will retaping be required for the next point evaluation? */
351 
352  vector< AD<SCIPInterval> > int_X; /**< interval vector of dependent variables */
353  vector< AD<SCIPInterval> > int_Y; /**< interval result vector */
354  CppAD::ADFun<SCIPInterval> int_f; /**< the function to evaluate on intervals as CppAD object */
355 
356  vector<SCIPInterval> int_x; /**< current interval values of dependent variables */
357  SCIPInterval int_val; /**< current interval function value */
358  bool int_need_retape; /**< will retaping be required for the next interval evaluation? */
359 
360  bool need_retape_always; /**< will retaping be always required? */
361  SCIP_EXPRINTCAPABILITY userevalcapability; /**< (intersection of) capabilities of evaluation rountines of user expressions */
362 
363  BMS_BLKMEM* blkmem; /**< block memory used to allocate expresstion tree */
364  SCIP_EXPR* root; /**< copy of expression tree; @todo we should not need to make a copy */
365 };
366 
367 #ifndef NO_CPPAD_USER_ATOMIC
368 
369 /** computes sparsity of jacobian for a univariate function during a forward sweep
370  *
371  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
372  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
373  */
374 static
376  size_t q, /**< number of columns in R */
377  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
378  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
379  )
380 {
381  assert(r.size() == q);
382  assert(s.size() == q);
383 
384  s = r;
385 
386  return true;
387 }
388 
389 /** Computes sparsity of jacobian during a reverse sweep
390  *
391  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
392  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
393  */
394 static
396  size_t q, /**< number of rows in R */
397  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
398  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
399  )
400 {
401  assert(r.size() == q);
402  assert(s.size() == q);
403 
404  s = r;
405 
406  return true;
407 }
408 
409 /** computes sparsity of hessian during a reverse sweep
410  *
411  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
412  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
413  */
414 static
416  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
417  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
418  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
419  size_t q, /**< number of columns in R, U, and V */
420  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
421  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
422  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
423  )
424 { /*lint --e{439,715}*/ /* @todo take vx into account */
425  assert(r.size() == q);
426  assert(s.size() == 1);
427  assert(t.size() == 1);
428  assert(u.size() == q);
429  assert(v.size() == q);
430 
431  // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
432  t[0] = s[0];
433 
434  // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
435  // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
436  v = u;
437  if( s[0] )
438  for( size_t j = 0; j < q; ++j )
439  if( r[j] )
440  v[j] = true;
441 
442  return true;
443 }
444 
445 
446 /** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
447  *
448  * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
449  * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
450  *
451  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
452  */
453 template<class Type>
454 class atomic_posintpower : public CppAD::atomic_base<Type>
455 {
456 public:
458  : CppAD::atomic_base<Type>("posintpower"),
459  exponent(0)
460  {
461  /* indicate that we want to use bool-based sparsity pattern */
462  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
463  }
464 
465 private:
466  /** exponent value for next call to forward or reverse */
467  int exponent;
468 
469  /** stores exponent value corresponding to next call to forward or reverse
470  *
471  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
472  */
473  virtual void set_id(size_t id)
474  {
475  exponent = (int) id;
476  }
477 
478  /** forward sweep of positive integer power
479  *
480  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
481  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
482  * in the taylor expansion of f(x) = x^p.
483  * Thus, y = x^p
484  * = tx[0]^p,
485  * y' = p * x^(p-1) * x'
486  * = p * tx[0]^(p-1) * tx[1],
487  * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
488  * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
489  */
490  bool forward(
491  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
492  size_t p, /**< highest order Taylor coefficient that we are evaluating */
493  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
494  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
495  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
496  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
497  )
498  {
499  assert(exponent > 1);
500  assert(tx.size() >= p+1);
501  assert(ty.size() >= p+1);
502  assert(q <= p);
503 
504  if( vx.size() > 0 )
505  {
506  assert(vx.size() == 1);
507  assert(vy.size() == 1);
508  assert(p == 0);
509 
510  vy[0] = vx[0];
511  }
512 
513  if( q == 0 /* q <= 0 && 0 <= p */ )
514  {
515  ty[0] = CppAD::pow(tx[0], exponent);
516  }
517 
518  if( q <= 1 && 1 <= p )
519  {
520  ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
521  ty[1] *= double(exponent);
522  }
523 
524  if( q <= 2 && 2 <= p )
525  {
526  if( exponent > 2 )
527  {
528  // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
529  ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
530  ty[2] *= (exponent-1) / 2.0;
531  ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
532  ty[2] *= exponent;
533  }
534  else
535  {
536  assert(exponent == 2);
537  // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
538  ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
539  }
540  }
541 
542  /* higher order derivatives not implemented */
543  if( p > 2 )
544  return false;
545 
546  return true;
547  }
548 
549  /** reverse sweep of positive integer power
550  *
551  * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
552  * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
553  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
554  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
555  * That is, we have to compute
556  *\f$
557  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
558  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
559  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
560  * \f$
561  *
562  * For k = 0, this means
563  *\f$
564  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
565  * = py[0] * (\partial x^p / \partial x)
566  * = py[0] * p * tx[0]^(p-1)
567  *\f$
568  *
569  * For k = 1, this means
570  * \f$
571  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
572  * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
573  * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
574  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
575  * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
576  * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
577  * \f$
578  */
579  bool reverse(
580  size_t p, /**< highest order Taylor coefficient that we are evaluating */
581  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
582  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
583  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
584  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
585  )
586  { /*lint --e{715}*/
587  assert(exponent > 1);
588  assert(px.size() >= p+1);
589  assert(py.size() >= p+1);
590  assert(tx.size() >= p+1);
591 
592  switch( p )
593  {
594  case 0:
595  // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
596  px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
597  px[0] *= exponent;
598  break;
599 
600  case 1:
601  // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
602  px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
603  px[0] *= exponent-1;
604  px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
605  px[0] *= exponent;
606  // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
607  px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
608  px[1] *= exponent;
609  break;
610 
611  default:
612  return false;
613  }
614 
615  return true;
616  }
617 
618  using CppAD::atomic_base<Type>::for_sparse_jac;
619 
620  /** computes sparsity of jacobian during a forward sweep
621  *
622  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
623  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
624  */
625  bool for_sparse_jac(
626  size_t q, /**< number of columns in R */
627  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
628  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
629  )
630  {
631  return univariate_for_sparse_jac(q, r, s);
632  }
633 
634  using CppAD::atomic_base<Type>::rev_sparse_jac;
635 
636  /** computes sparsity of jacobian during a reverse sweep
637  *
638  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
639  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
640  */
641  bool rev_sparse_jac(
642  size_t q, /**< number of rows in R */
643  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
644  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
645  )
646  {
647  return univariate_rev_sparse_jac(q, r, s);
648  }
649 
650  using CppAD::atomic_base<Type>::rev_sparse_hes;
651 
652  /** computes sparsity of hessian during a reverse sweep
653  *
654  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
655  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
656  */
657  bool rev_sparse_hes(
658  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
659  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
660  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
661  size_t q, /**< number of columns in R, U, and V */
662  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
663  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
664  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
665  )
666  {
667  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
668  }
669 };
670 
671 /** power function with natural exponents */
672 template<class Type>
673 static
675  const vector<Type>& in, /**< vector which first argument is base */
676  vector<Type>& out, /**< vector where to store result in first argument */
677  size_t exponent /**< exponent */
678  )
679 {
681  pip(in, out, exponent);
682 }
683 
684 #else
685 
686 /** power function with natural exponents */
687 template<class Type>
688 void posintpower(
689  const vector<Type>& in, /**< vector which first argument is base */
690  vector<Type>& out, /**< vector where to store result in first argument */
691  size_t exponent /**< exponent */
692  )
693 {
694  out[0] = pow(in[0], (int)exponent);
695 }
696 
697 #endif
698 
699 
700 #ifndef NO_CPPAD_USER_ATOMIC
701 
702 /** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
703  *
704  * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
705  * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
706  * a continuously differentiable function.
707  *
708  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
709  */
710 template<class Type>
711 class atomic_signpower : public CppAD::atomic_base<Type>
712 {
713 public:
715  : CppAD::atomic_base<Type>("signpower"),
716  exponent(0.0)
717  {
718  /* indicate that we want to use bool-based sparsity pattern */
719  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
720  }
721 
722 private:
723  /** exponent for use in next call to forward or reverse */
724  SCIP_Real exponent;
725 
726  /** stores exponent corresponding to next call to forward or reverse
727  *
728  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
729  */
730  virtual void set_id(size_t id)
731  {
732  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
733  }
734 
735  /** forward sweep of signpower
736  *
737  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
738  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
739  * in the taylor expansion of f(x) = sign(x)abs(x)^p.
740  * Thus, y = sign(x)abs(x)^p
741  * = sign(tx[0])abs(tx[0])^p,
742  * y' = p * abs(x)^(p-1) * x'
743  * = p * abs(tx[0])^(p-1) * tx[1],
744  * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
745  * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
746  */
747  bool forward(
748  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
749  size_t p, /**< highest order Taylor coefficient that we are evaluating */
750  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
751  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
752  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
753  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
754  )
755  {
756  assert(exponent > 0.0);
757  assert(tx.size() >= p+1);
758  assert(ty.size() >= p+1);
759  assert(q <= p);
760 
761  if( vx.size() > 0 )
762  {
763  assert(vx.size() == 1);
764  assert(vy.size() == 1);
765  assert(p == 0);
766 
767  vy[0] = vx[0];
768  }
769 
770  if( q == 0 /* q <= 0 && 0 <= p */ )
771  {
772  ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
773  }
774 
775  if( q <= 1 && 1 <= p )
776  {
777  ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
778  ty[1] *= exponent;
779  }
780 
781  if( q <= 2 && 2 <= p )
782  {
783  if( exponent != 2.0 )
784  {
785  ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
786  ty[2] *= (exponent - 1.0) / 2.0;
787  ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
788  ty[2] *= exponent;
789  }
790  else
791  {
792  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
793  ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
794  ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
795  }
796  }
797 
798  /* higher order derivatives not implemented */
799  if( p > 2 )
800  return false;
801 
802  return true;
803  }
804 
805  /** reverse sweep of signpower
806  *
807  * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
808  * y(x) = [ f(x), f'(x), f''(x), ... ].
809  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
810  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
811  * That is, we have to compute
812  *\f$
813  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
814  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
815  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
816  *\f$
817  *
818  * For k = 0, this means
819  *\f$
820  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
821  * = py[0] * (\partial f(x) / \partial x)
822  * = py[0] * p * abs(tx[0])^(p-1)
823  * \f$
824  *
825  * For k = 1, this means
826  *\f$
827  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
828  * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
829  * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
830  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
831  * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
832  * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
833  * \f$
834  */
835  bool reverse(
836  size_t p, /**< highest order Taylor coefficient that we are evaluating */
837  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
838  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
839  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
840  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
841  )
842  { /*lint --e{715}*/
843  assert(exponent > 1);
844  assert(px.size() >= p+1);
845  assert(py.size() >= p+1);
846  assert(tx.size() >= p+1);
847 
848  switch( p )
849  {
850  case 0:
851  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
852  px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
853  px[0] *= p;
854  break;
855 
856  case 1:
857  if( exponent != 2.0 )
858  {
859  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
860  px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
861  px[0] *= exponent - 1.0;
862  px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
863  px[0] *= exponent;
864  // px[1] = py[1] * p * abs(tx[0])^(p-1)
865  px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
866  px[1] *= exponent;
867  }
868  else
869  {
870  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
871  px[0] = py[1] * tx[1] * SIGN(tx[0]);
872  px[0] += py[0] * REALABS(tx[0]);
873  px[0] *= 2.0;
874  // px[1] = py[1] * 2.0 * abs(tx[0])
875  px[1] = py[1] * REALABS(tx[0]);
876  px[1] *= 2.0;
877  }
878  break;
879 
880  default:
881  return false;
882  }
883 
884  return true;
885  }
886 
887  using CppAD::atomic_base<Type>::for_sparse_jac;
888 
889  /** computes sparsity of jacobian during a forward sweep
890  *
891  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
892  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
893  */
894  bool for_sparse_jac(
895  size_t q, /**< number of columns in R */
896  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
897  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
898  )
899  {
900  return univariate_for_sparse_jac(q, r, s);
901  }
902 
903  using CppAD::atomic_base<Type>::rev_sparse_jac;
904 
905  /** computes sparsity of jacobian during a reverse sweep
906  *
907  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
908  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
909  */
910  bool rev_sparse_jac(
911  size_t q, /**< number of rows in R */
912  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
913  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
914  )
915  {
916  return univariate_rev_sparse_jac(q, r, s);
917  }
918 
919  using CppAD::atomic_base<Type>::rev_sparse_hes;
920 
921  /** computes sparsity of hessian during a reverse sweep
922  *
923  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
924  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
925  */
926  bool rev_sparse_hes(
927  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
928  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
929  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
930  size_t q, /**< number of columns in S and R */
931  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
932  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
933  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
934  )
935  {
936  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
937  }
938 
939 };
940 
941 /** Specialization of atomic_signpower template for intervals */
942 template<>
943 class atomic_signpower<SCIPInterval> : public CppAD::atomic_base<SCIPInterval>
944 {
945 public:
947  : CppAD::atomic_base<SCIPInterval>("signpowerint"),
948  exponent(0.0)
949  {
950  /* indicate that we want to use bool-based sparsity pattern */
951  this->option(CppAD::atomic_base<SCIPInterval>::bool_sparsity_enum);
952  }
953 
954 private:
955  /** exponent for use in next call to forward or reverse */
956  SCIP_Real exponent;
957 
958  /** stores exponent corresponding to next call to forward or reverse
959  *
960  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
961  */
962  virtual void set_id(size_t id)
963  {
964  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
965  }
966 
967  /** specialization of atomic_signpower::forward template for SCIPinterval
968  *
969  * @todo try to compute tighter resultants
970  */
971  bool forward(
972  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
973  size_t p, /**< highest order Taylor coefficient that we are evaluating */
974  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
975  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
976  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
977  CppAD::vector<SCIPInterval>& ty /**< vector to store taylor coefficients of y */
978  )
979  {
980  assert(exponent > 0.0);
981  assert(tx.size() >= p+1);
982  assert(ty.size() >= p+1);
983  assert(q <= p);
984 
985  if( vx.size() > 0 )
986  {
987  assert(vx.size() == 1);
988  assert(vy.size() == 1);
989  assert(p == 0);
990 
991  vy[0] = vx[0];
992  }
993 
994  if( q == 0 /* q <= 0 && 0 <= p */ )
995  {
996  ty[0] = CppAD::signpow(tx[0], exponent);
997  }
998 
999  if( q <= 1 && 1 <= p )
1000  {
1001  ty[1] = CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[1];
1002  ty[1] *= p;
1003  }
1004 
1005  if( q <= 2 && 2 <= p )
1006  {
1007  if( exponent != 2.0 )
1008  {
1009  ty[2] = CppAD::signpow(tx[0], exponent - 2.0) * CppAD::square(tx[1]);
1010  ty[2] *= (exponent - 1.0) / 2.0;
1011  ty[2] += CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[2];
1012  ty[2] *= exponent;
1013  }
1014  else
1015  {
1016  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
1017  ty[2] = CppAD::sign(tx[0]) * CppAD::square(tx[1]);
1018  ty[2] += 2.0 * CppAD::abs(tx[0]) * tx[2];
1019  }
1020  }
1021 
1022  /* higher order derivatives not implemented */
1023  if( p > 2 )
1024  return false;
1025 
1026  return true;
1027  }
1028 
1029  /** specialization of atomic_signpower::reverse template for SCIPinterval
1030  *
1031  * @todo try to compute tighter resultants
1032  */
1033  bool reverse(
1034  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1035  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
1036  const CppAD::vector<SCIPInterval>& ty, /**< values for taylor coefficients of y */
1037  CppAD::vector<SCIPInterval>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1038  const CppAD::vector<SCIPInterval>& py /**< values for partial derivatives of g(x) w.r.t. y */
1039  )
1040  { /*lint --e{715} */
1041  assert(exponent > 1);
1042  assert(px.size() >= p+1);
1043  assert(py.size() >= p+1);
1044  assert(tx.size() >= p+1);
1045 
1046  switch( p )
1047  {
1048  case 0:
1049  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
1050  px[0] = py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1051  px[0] *= exponent;
1052  break;
1053 
1054  case 1:
1055  if( exponent != 2.0 )
1056  {
1057  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
1058  px[0] = py[1] * tx[1] * CppAD::signpow(tx[0], exponent - 2.0);
1059  px[0] *= exponent - 1.0;
1060  px[0] += py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1061  px[0] *= exponent;
1062  // px[1] = py[1] * p * abs(tx[0])^(p-1)
1063  px[1] = py[1] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1064  px[1] *= exponent;
1065  }
1066  else
1067  {
1068  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
1069  px[0] = py[1] * tx[1] * CppAD::sign(tx[0]);
1070  px[0] += py[0] * CppAD::abs(tx[0]);
1071  px[0] *= 2.0;
1072  // px[1] = py[1] * 2.0 * abs(tx[0])
1073  px[1] = py[1] * CppAD::abs(tx[0]);
1074  px[1] *= 2.0;
1075  }
1076  break;
1077 
1078  default:
1079  return false;
1080  }
1081 
1082  return true;
1083  }
1084 
1085  using CppAD::atomic_base<SCIPInterval>::for_sparse_jac;
1086 
1087  /** computes sparsity of jacobian during a forward sweep
1088  *
1089  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1090  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1091  */
1092  bool for_sparse_jac(
1093  size_t q, /**< number of columns in R */
1094  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1095  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1096  )
1097  {
1098  return univariate_for_sparse_jac(q, r, s);
1099  }
1100 
1101  using CppAD::atomic_base<SCIPInterval>::rev_sparse_jac;
1102 
1103  /** computes sparsity of jacobian during a reverse sweep
1104  *
1105  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
1106  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1107  */
1108  bool rev_sparse_jac(
1109  size_t q, /**< number of rows in R */
1110  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
1111  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
1112  )
1113  {
1114  return univariate_rev_sparse_jac(q, r, s);
1115  }
1116 
1117  using CppAD::atomic_base<SCIPInterval>::rev_sparse_hes;
1118 
1119  /** computes sparsity of hessian during a reverse sweep
1120  *
1121  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
1122  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1123  */
1124  bool rev_sparse_hes(
1125  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1126  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1127  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1128  size_t q, /**< number of columns in S and R */
1129  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1130  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1131  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1132  )
1133  {
1134  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
1135  }
1136 };
1137 
1138 /** template for evaluation for signpower operator */
1139 template<class Type>
1140 static
1142  Type& resultant, /**< resultant */
1143  const Type& arg, /**< operand */
1144  SCIP_EXPR* expr /**< expression that holds the exponent */
1145  )
1146 {
1147  vector<Type> in(1, arg);
1148  vector<Type> out(1);
1149 
1151  sp(in, out, (size_t)(void*)expr);
1152 
1153  resultant = out[0];
1154  return;
1155 }
1156 
1157 #else
1158 
1159 /** template for evaluation for signpower operator
1160  *
1161  * Only implemented for real numbers, thus gives error by default.
1162  */
1163 template<class Type>
1164 static
1165 void evalSignPower(
1166  Type& resultant, /**< resultant */
1167  const Type& arg, /**< operand */
1168  SCIP_EXPR* expr /**< expression that holds the exponent */
1169  )
1170 { /*lint --e{715}*/
1171  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1172  "evalSignPower()",
1173  "Error: SignPower not implemented for this value type"
1174  );
1175 }
1176 
1177 /** specialization of signpower evaluation for real numbers */
1178 template<>
1179 void evalSignPower(
1180  CppAD::AD<double>& resultant, /**< resultant */
1181  const CppAD::AD<double>& arg, /**< operand */
1182  SCIP_EXPR* expr /**< expression that holds the exponent */
1183  )
1184 {
1185  SCIP_Real exponent;
1186 
1187  exponent = SCIPexprGetSignPowerExponent(expr);
1188 
1189  if( arg == 0.0 )
1190  resultant = 0.0;
1191  else if( arg > 0.0 )
1192  resultant = pow( arg, exponent);
1193  else
1194  resultant = -pow(-arg, exponent);
1195 }
1196 
1197 #endif
1198 
1199 
1200 #ifndef NO_CPPAD_USER_ATOMIC
1201 
1202 template<class Type>
1204  SCIP_EXPR* expr,
1205  Type* x,
1206  Type& funcval,
1207  Type* gradient,
1208  Type* hessian
1209  )
1210 {
1211  return SCIPexprEvalUser(expr, x, &funcval, gradient, hessian); /*lint !e429*/
1212 }
1213 
1214 template<>
1216  SCIP_EXPR* expr,
1217  SCIPInterval* x,
1218  SCIPInterval& funcval,
1219  SCIPInterval* gradient,
1220  SCIPInterval* hessian
1221  )
1222 {
1223  return SCIPexprEvalIntUser(expr, SCIPInterval::infinity, x, &funcval, gradient, hessian);
1224 }
1225 
1226 /** Automatic differentiation of user expression as CppAD user-atomic function.
1227  *
1228  * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
1229  */
1230 template<class Type>
1231 class atomic_userexpr : public CppAD::atomic_base<Type>
1232 {
1233 public:
1235  : CppAD::atomic_base<Type>("userexpr"),
1236  expr(NULL)
1237  {
1238  /* indicate that we want to use bool-based sparsity pattern */
1239  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
1240  }
1241 
1242 private:
1243  /** user expression */
1244  SCIP_EXPR* expr;
1245 
1246  /** stores user expression corresponding to next call to forward or reverse
1247  *
1248  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
1249  */
1250  virtual void set_id(size_t id)
1251  {
1252  expr = (SCIP_EXPR*)(void*)id;
1253  assert(SCIPexprGetOperator(expr) == SCIP_EXPR_USER);
1254  }
1255 
1256  /** forward sweep of userexpr
1257  *
1258  * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
1259  * Note, that p and q are interchanged!
1260  *
1261  * For a scalar variable t, let
1262  * Y(t) = f(X(t))
1263  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1264  * where for x^i the i an index, while for t^i the i is an exponent.
1265  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1266  *
1267  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1268  * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
1269  * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
1270  * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
1271  *
1272  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
1273  * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
1274  * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
1275  * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
1276  */
1277  bool forward(
1278  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
1279  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1280  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1281  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
1282  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1283  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
1284  )
1285  {
1286  assert(expr != NULL);
1287  assert(ty.size() == p+1);
1288  assert(q <= p);
1289 
1290  size_t n = tx.size() / (p+1);
1291  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1292  assert(n >= 1);
1293 
1294  if( vx.size() > 0 )
1295  {
1296  assert(vx.size() == n);
1297  assert(vy.size() == 1);
1298  assert(p == 0);
1299 
1300  /* y_0 is a variable if at least one of the x_i is a variable */
1301  vy[0] = false;
1302  for( size_t i = 0; i < n; ++i )
1303  if( vx[i] )
1304  {
1305  vy[0] = true;
1306  break;
1307  }
1308  }
1309 
1310  Type* x = new Type[n];
1311  Type* gradient = NULL;
1312  Type* hessian = NULL;
1313 
1314  if( q <= 2 && 1 <= p )
1315  gradient = new Type[n];
1316  if( q <= 2 && 2 <= p )
1317  hessian = new Type[n*n];
1318 
1319  for( size_t i = 0; i < n; ++i )
1320  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1321 
1322  if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
1323  {
1324  delete[] x;
1325  delete[] gradient;
1326  delete[] hessian;
1327  return false;
1328  }
1329 
1330  if( gradient != NULL )
1331  {
1332  ty[1] = 0.0;
1333  for( size_t i = 0; i < n; ++i )
1334  ty[1] += gradient[i] * tx[i * (p+1) + 1];
1335  }
1336 
1337  if( hessian != NULL )
1338  {
1339  assert(gradient != NULL);
1340 
1341  ty[2] = 0.0;
1342  for( size_t i = 0; i < n; ++i )
1343  {
1344  for( size_t j = 0; j < n; ++j )
1345  ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1346 
1347  ty[2] += gradient[i] * tx[i * (p+1) + 2];
1348  }
1349  }
1350 
1351  delete[] x;
1352  delete[] gradient;
1353  delete[] hessian;
1354 
1355  /* higher order derivatives not implemented */
1356  if( p > 2 )
1357  return false;
1358 
1359  return true;
1360  }
1361 
1362  /** reverse sweep of userexpr
1363  *
1364  * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1365  * Note, that there q is our p.
1366  *
1367  * For a scalar variable t, let
1368  * Y(t) = f(X(t))
1369  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1370  * where for x^i the i an index, while for t^i the i is an exponent.
1371  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1372  *
1373  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1374  * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1375  * y^0, y^1, ... are the taylor coefficients of f(x).
1376  *
1377  * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1378  * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1379  * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1380  * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1381  *
1382  * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1383  * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1384  * \f$
1385  * px^l = \partial H / \partial x^l
1386  * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1387  * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1388  * \f$
1389  *
1390  * For p = 0, this means
1391  * \f$
1392  * px^0 = py[0] * \partial F^0 / \partial x^0
1393  * = py[0] * \partial f(x^0) / \partial x^0
1394  * = py[0] * f'(x^0)
1395  * \f$
1396  *
1397  * For p = 1, this means (for l = 0):
1398  * \f[
1399  * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1400  * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1401  * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1402  * \f]
1403  * and (for l=1):
1404  * \[
1405  * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1406  * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1407  * = py[0] * 0 + py[1] * f'(x^0)
1408  * \f]
1409  *
1410  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1411  * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1412  * for p = 0:
1413  * px[i] = (px^0)_i = py[0] * grad[i]
1414  * for p = 1:
1415  * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1416  * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1417  */
1418  bool reverse(
1419  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1420  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
1421  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
1422  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1423  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
1424  )
1425  {
1426  assert(expr != NULL);
1427  assert(px.size() == tx.size());
1428  assert(py.size() == p+1);
1429 
1430  size_t n = tx.size() / (p+1);
1431  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1432  assert(n >= 1);
1433 
1434  Type* x = new Type[n];
1435  Type funcval;
1436  Type* gradient = new Type[n];
1437  Type* hessian = NULL;
1438 
1439  if( p == 1 )
1440  hessian = new Type[n*n];
1441 
1442  for( size_t i = 0; i < n; ++i )
1443  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1444 
1445  if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1446  {
1447  delete[] x;
1448  delete[] gradient;
1449  delete[] hessian;
1450  return false;
1451  }
1452 
1453  switch( p )
1454  {
1455  case 0:
1456  // px[j] = (px^0)_j = py[0] * grad[j]
1457  for( size_t i = 0; i < n; ++i )
1458  px[i] = py[0] * gradient[i];
1459  break;
1460 
1461  case 1:
1462  // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1463  // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1464  assert(hessian != NULL);
1465  for( size_t i = 0; i < n; ++i )
1466  {
1467  px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1468  for( size_t j = 0; j < n; ++j )
1469  px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1470 
1471  px[i*2+1] = py[1] * gradient[i];
1472  }
1473  break;
1474 
1475  default:
1476  return false;
1477  }
1478 
1479  return true;
1480  } /*lint !e715*/
1481 
1482  using CppAD::atomic_base<Type>::for_sparse_jac;
1483 
1484  /** computes sparsity of jacobian during a forward sweep
1485  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1486  * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1487  */
1488  bool for_sparse_jac(
1489  size_t q, /**< number of columns in R */
1490  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1491  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1492  )
1493  {
1494  assert(expr != NULL);
1495  assert(s.size() == q);
1496 
1497  size_t n = r.size() / q;
1498  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1499 
1500  // sparsity for S(x) = f'(x) * R
1501  for( size_t j = 0; j < q; j++ )
1502  {
1503  s[j] = false;
1504  for( size_t i = 0; i < n; i++ )
1505  s[j] |= (bool)r[i * q + j]; /*lint !e1786*/
1506  }
1507 
1508  return true;
1509  }
1510 
1511  using CppAD::atomic_base<Type>::rev_sparse_jac;
1512 
1513  /** computes sparsity of jacobian during a reverse sweep
1514  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1515  * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1516  */
1517  bool rev_sparse_jac(
1518  size_t q, /**< number of rows in R */
1519  const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1520  CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1521  )
1522  {
1523  assert(expr != NULL);
1524  assert(rt.size() == q);
1525 
1526  size_t n = st.size() / q;
1527  assert(n == (size_t)SCIPexprGetNChildren(expr));
1528 
1529  // sparsity for S(x)^T = f'(x)^T * R^T
1530  for( size_t j = 0; j < q; j++ )
1531  for( size_t i = 0; i < n; i++ )
1532  st[i * q + j] = rt[j];
1533 
1534  return true;
1535  }
1536 
1537  using CppAD::atomic_base<Type>::rev_sparse_hes;
1538 
1539  /** computes sparsity of hessian during a reverse sweep
1540  * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1541  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1542  */
1543  bool rev_sparse_hes(
1544  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1545  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1546  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1547  size_t q, /**< number of columns in S and R */
1548  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1549  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1550  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1551  )
1552  {
1553  assert(expr != NULL);
1554  size_t n = vx.size();
1555  assert((size_t)SCIPexprGetNChildren(expr) == n);
1556  assert(s.size() == 1);
1557  assert(t.size() == n);
1558  assert(r.size() == n * q);
1559  assert(u.size() == q);
1560  assert(v.size() == n * q);
1561 
1562  size_t i, j, k;
1563 
1564  // sparsity for T(x) = S(x) * f'(x)
1565  for( i = 0; i < n; ++i )
1566  t[i] = s[0];
1567 
1568  // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1569  // U(x) = g''(y) * f'(x) * R
1570  // S(x) = g'(y)
1571 
1572  // back propagate the sparsity for U
1573  for( j = 0; j < q; j++ )
1574  for( i = 0; i < n; i++ )
1575  v[ i * q + j] = u[j];
1576 
1577  // include forward Jacobian sparsity in Hessian sparsity
1578  // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1579  if( s[0] )
1580  for( j = 0; j < q; j++ )
1581  for( i = 0; i < n; i++ )
1582  for( k = 0; k < n; ++k )
1583  v[ i * q + j] |= (bool) r[ k * q + j];
1584 
1585  return true;
1586  }
1587 
1588 };
1589 
1590 template<class Type>
1591 static
1593  Type& resultant, /**< resultant */
1594  const Type* args, /**< operands */
1595  SCIP_EXPR* expr /**< expression that holds the user expression */
1596  )
1597 {
1598  assert( args != 0 );
1599  vector<Type> in(args, args + SCIPexprGetNChildren(expr));
1600  vector<Type> out(1);
1601 
1603  u(in, out, (size_t)(void*)expr);
1604 
1605  resultant = out[0];
1606  return;
1607 }
1608 
1609 #else
1610 
1611 template<class Type>
1612 static
1613 void evalUser(
1614  Type& resultant, /**< resultant */
1615  const Type* args, /**< operands */
1616  SCIP_EXPR* expr /**< expression that holds the user expression */
1617  )
1618 {
1619  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1620  "evalUser()",
1621  "Error: user expressions in CppAD not possible without CppAD user atomic facility"
1622  );
1623 }
1624 
1625 #endif
1626 
1627 /** template for evaluation for minimum operator
1628  *
1629  * Only implemented for real numbers, thus gives error by default.
1630  * @todo implement own userad function
1631  */
1632 template<class Type>
1633 static
1634 void evalMin(
1635  Type& resultant, /**< resultant */
1636  const Type& arg1, /**< first operand */
1637  const Type& arg2 /**< second operand */
1638  )
1639 { /*lint --e{715,1764}*/
1640  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1641  "evalMin()",
1642  "Error: Min not implemented for this value type"
1643  );
1644 }
1645 
1646 /** specialization of minimum evaluation for real numbers */
1647 template<>
1648 void evalMin(
1649  CppAD::AD<double>& resultant, /**< resultant */
1650  const CppAD::AD<double>& arg1, /**< first operand */
1651  const CppAD::AD<double>& arg2 /**< second operand */
1652  )
1653 {
1654  resultant = MIN(arg1, arg2);
1655 }
1656 
1657 /** template for evaluation for maximum operator
1658  *
1659  * Only implemented for real numbers, thus gives error by default.
1660  * @todo implement own userad function
1661  */
1662 template<class Type>
1663 static
1664 void evalMax(
1665  Type& resultant, /**< resultant */
1666  const Type& arg1, /**< first operand */
1667  const Type& arg2 /**< second operand */
1668  )
1669 { /*lint --e{715,1764}*/
1670  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1671  "evalMax()",
1672  "Error: Max not implemented for this value type"
1673  );
1674 }
1675 
1676 /** specialization of maximum evaluation for real numbers */
1677 template<>
1678 void evalMax(
1679  CppAD::AD<double>& resultant, /**< resultant */
1680  const CppAD::AD<double>& arg1, /**< first operand */
1681  const CppAD::AD<double>& arg2 /**< second operand */
1682  )
1683 {
1684  resultant = MAX(arg1, arg2);
1685 }
1686 
1687 /** template for evaluation for square-root operator
1688  *
1689  * Default is to use the standard sqrt-function.
1690  */
1691 template<class Type>
1692 static
1694  Type& resultant, /**< resultant */
1695  const Type& arg /**< operand */
1696  )
1697 {
1698  resultant = sqrt(arg);
1699 }
1700 
1701 /** template for evaluation for absolute value operator */
1702 template<class Type>
1703 static
1704 void evalAbs(
1705  Type& resultant, /**< resultant */
1706  const Type& arg /**< operand */
1707  )
1708 {
1709  resultant = abs(arg);
1710 }
1711 
1712 /** specialization of absolute value evaluation for intervals
1713  *
1714  * Use sqrt(x^2) for now @todo implement own userad function.
1715  */
1716 template<>
1717 void evalAbs(
1718  CppAD::AD<SCIPInterval>& resultant, /**< resultant */
1719  const CppAD::AD<SCIPInterval>& arg /**< operand */
1720  )
1721 {
1722  vector<CppAD::AD<SCIPInterval> > in(1, arg);
1723  vector<CppAD::AD<SCIPInterval> > out(1);
1724 
1725  posintpower(in, out, 2);
1726 
1727  resultant = sqrt(out[0]);
1728 }
1729 
1730 /** integer power operation for arbitrary integer exponents */
1731 template<class Type>
1732 static
1734  Type& resultant, /**< resultant */
1735  const Type& arg, /**< operand */
1736  const int exponent /**< exponent */
1737  )
1738 {
1739  if( exponent > 1 )
1740  {
1741  vector<Type> in(1, arg);
1742  vector<Type> out(1);
1743 
1744  posintpower(in, out, exponent);
1745 
1746  resultant = out[0];
1747  return;
1748  }
1749 
1750  if( exponent < -1 )
1751  {
1752  vector<Type> in(1, arg);
1753  vector<Type> out(1);
1754 
1755  posintpower(in, out, -exponent);
1756 
1757  resultant = Type(1.0)/out[0];
1758  return;
1759  }
1760 
1761  if( exponent == 1 )
1762  {
1763  resultant = arg;
1764  return;
1765  }
1766 
1767  if( exponent == 0 )
1768  {
1769  resultant = Type(1.0);
1770  return;
1771  }
1772 
1773  assert(exponent == -1);
1774  resultant = Type(1.0)/arg;
1775 }
1776 
1777 /** CppAD compatible evaluation of an expression for given arguments and parameters */
1778 template<class Type>
1779 static
1781  SCIP_EXPR* expr, /**< expression */
1782  const vector<Type>& x, /**< values of variables */
1783  SCIP_Real* param, /**< values of parameters */
1784  Type& val /**< buffer to store expression value */
1785  )
1786 {
1787  Type* buf = 0;
1788 
1789  assert(expr != NULL);
1790 
1791  /* todo use SCIP_MAXCHILD_ESTIMATE as in expression.c */
1792 
1793  if( SCIPexprGetNChildren(expr) )
1794  {
1795  if( BMSallocMemoryArray(&buf, SCIPexprGetNChildren(expr)) == NULL ) /*lint !e666*/
1796  return SCIP_NOMEMORY;
1797 
1798  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1799  {
1800  SCIP_CALL( eval(SCIPexprGetChildren(expr)[i], x, param, buf[i]) );
1801  }
1802  }
1803 
1804  switch(SCIPexprGetOperator(expr))
1805  {
1806  case SCIP_EXPR_VARIDX:
1807  assert(SCIPexprGetOpIndex(expr) < (int)x.size());
1808  val = x[SCIPexprGetOpIndex(expr)];
1809  break;
1810 
1811  case SCIP_EXPR_CONST:
1812  val = SCIPexprGetOpReal(expr);
1813  break;
1814 
1815  case SCIP_EXPR_PARAM:
1816  assert(param != NULL);
1817  val = param[SCIPexprGetOpIndex(expr)];
1818  break;
1819 
1820  case SCIP_EXPR_PLUS:
1821  assert( buf != 0 );
1822  val = buf[0] + buf[1];
1823  break;
1824 
1825  case SCIP_EXPR_MINUS:
1826  assert( buf != 0 );
1827  val = buf[0] - buf[1];
1828  break;
1829 
1830  case SCIP_EXPR_MUL:
1831  assert( buf != 0 );
1832  val = buf[0] * buf[1];
1833  break;
1834 
1835  case SCIP_EXPR_DIV:
1836  assert( buf != 0 );
1837  val = buf[0] / buf[1];
1838  break;
1839 
1840  case SCIP_EXPR_SQUARE:
1841  assert( buf != 0 );
1842  evalIntPower(val, buf[0], 2);
1843  break;
1844 
1845  case SCIP_EXPR_SQRT:
1846  assert( buf != 0 );
1847  evalSqrt(val, buf[0]);
1848  break;
1849 
1850  case SCIP_EXPR_REALPOWER:
1851  assert( buf != 0 );
1852  val = CppAD::pow(buf[0], SCIPexprGetRealPowerExponent(expr));
1853  break;
1854 
1855  case SCIP_EXPR_INTPOWER:
1856  assert( buf != 0 );
1857  evalIntPower(val, buf[0], SCIPexprGetIntPowerExponent(expr));
1858  break;
1859 
1860  case SCIP_EXPR_SIGNPOWER:
1861  assert( buf != 0 );
1862  evalSignPower(val, buf[0], expr);
1863  break;
1864 
1865  case SCIP_EXPR_EXP:
1866  assert( buf != 0 );
1867  val = exp(buf[0]);
1868  break;
1869 
1870  case SCIP_EXPR_LOG:
1871  assert( buf != 0 );
1872  val = log(buf[0]);
1873  break;
1874 
1875  case SCIP_EXPR_SIN:
1876  assert( buf != 0 );
1877  val = sin(buf[0]);
1878  break;
1879 
1880  case SCIP_EXPR_COS:
1881  assert( buf != 0 );
1882  val = cos(buf[0]);
1883  break;
1884 
1885  case SCIP_EXPR_TAN:
1886  assert( buf != 0 );
1887  val = tan(buf[0]);
1888  break;
1889 #ifdef SCIP_DISABLED_CODE /* these operators are currently disabled */
1890  case SCIP_EXPR_ERF:
1891  assert( buf != 0 );
1892  val = erf(buf[0]);
1893  break;
1894 
1895  case SCIP_EXPR_ERFI:
1896  return SCIP_ERROR;
1897 #endif
1898  case SCIP_EXPR_MIN:
1899  assert( buf != 0 );
1900  evalMin(val, buf[0], buf[1]);
1901  break;
1902 
1903  case SCIP_EXPR_MAX:
1904  assert( buf != 0 );
1905  evalMax(val, buf[0], buf[1]);
1906  break;
1907 
1908  case SCIP_EXPR_ABS:
1909  assert( buf != 0 );
1910  evalAbs(val, buf[0]);
1911  break;
1912 
1913  case SCIP_EXPR_SIGN:
1914  assert( buf != 0 );
1915  val = sign(buf[0]);
1916  break;
1917 
1918  case SCIP_EXPR_SUM:
1919  assert( buf != 0 );
1920  val = 0.0;
1921  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1922  val += buf[i];
1923  break;
1924 
1925  case SCIP_EXPR_PRODUCT:
1926  assert( buf != 0 );
1927  val = 1.0;
1928  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1929  val *= buf[i];
1930  break;
1931 
1932  case SCIP_EXPR_LINEAR:
1933  {
1934  SCIP_Real* coefs;
1935 
1936  coefs = SCIPexprGetLinearCoefs(expr);
1937  assert(coefs != NULL || SCIPexprGetNChildren(expr) == 0);
1938 
1939  assert( buf != 0 );
1940  val = SCIPexprGetLinearConstant(expr);
1941  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1942  val += coefs[i] * buf[i]; /*lint !e613*/
1943  break;
1944  }
1945 
1946  case SCIP_EXPR_QUADRATIC:
1947  {
1948  SCIP_Real* lincoefs;
1949  SCIP_QUADELEM* quadelems;
1950  int nquadelems;
1951  SCIP_Real sqrcoef;
1952  Type lincoef;
1953  vector<Type> in(1);
1954  vector<Type> out(1);
1955 
1956  assert( buf != 0 );
1957 
1958  lincoefs = SCIPexprGetQuadLinearCoefs(expr);
1959  nquadelems = SCIPexprGetNQuadElements(expr);
1960  quadelems = SCIPexprGetQuadElements(expr);
1961  assert(quadelems != NULL || nquadelems == 0);
1962 
1963  SCIPexprSortQuadElems(expr);
1964 
1965  val = SCIPexprGetQuadConstant(expr);
1966 
1967  /* for each argument, we collect it's linear index from lincoefs, it's square coefficients and all factors from bilinear terms
1968  * then we compute the interval sqrcoef*x^2 + lincoef*x and add it to result */
1969  int i = 0;
1970  for( int argidx = 0; argidx < SCIPexprGetNChildren(expr); ++argidx )
1971  {
1972  if( i == nquadelems || quadelems[i].idx1 > argidx ) /*lint !e613*/
1973  {
1974  /* there are no quadratic terms with argidx in its first argument, that should be easy to handle */
1975  if( lincoefs != NULL )
1976  val += lincoefs[argidx] * buf[argidx];
1977  continue;
1978  }
1979 
1980  sqrcoef = 0.0;
1981  lincoef = lincoefs != NULL ? lincoefs[argidx] : 0.0;
1982 
1983  assert(i < nquadelems && quadelems[i].idx1 == argidx); /*lint !e613*/
1984  do
1985  {
1986  if( quadelems[i].idx2 == argidx ) /*lint !e613*/
1987  sqrcoef += quadelems[i].coef; /*lint !e613*/
1988  else
1989  lincoef += quadelems[i].coef * buf[quadelems[i].idx2]; /*lint !e613*/
1990  ++i;
1991  } while( i < nquadelems && quadelems[i].idx1 == argidx ); /*lint !e613*/
1992  assert(i == nquadelems || quadelems[i].idx1 > argidx); /*lint !e613*/
1993 
1994  /* this is not as good as what we can get from SCIPintervalQuad, but easy to implement */
1995  if( sqrcoef != 0.0 )
1996  {
1997  in[0] = buf[argidx];
1998  posintpower(in, out, 2);
1999  val += sqrcoef * out[0];
2000  }
2001 
2002  val += lincoef * buf[argidx];
2003  }
2004  assert(i == nquadelems);
2005 
2006  break;
2007  }
2008 
2009  case SCIP_EXPR_POLYNOMIAL:
2010  {
2011  SCIP_EXPRDATA_MONOMIAL** monomials;
2012  Type childval;
2013  Type monomialval;
2014  SCIP_Real exponent;
2015  int nmonomials;
2016  int nfactors;
2017  int* childidxs;
2018  SCIP_Real* exponents;
2019  int i;
2020  int j;
2021 
2022  assert( buf != 0 );
2023 
2024  val = SCIPexprGetPolynomialConstant(expr);
2025 
2026  nmonomials = SCIPexprGetNMonomials(expr);
2027  monomials = SCIPexprGetMonomials(expr);
2028 
2029  for( i = 0; i < nmonomials; ++i )
2030  {
2031  nfactors = SCIPexprGetMonomialNFactors(monomials[i]);
2032  childidxs = SCIPexprGetMonomialChildIndices(monomials[i]);
2033  exponents = SCIPexprGetMonomialExponents(monomials[i]);
2034  monomialval = SCIPexprGetMonomialCoef(monomials[i]);
2035 
2036  for( j = 0; j < nfactors; ++j )
2037  {
2038  assert(childidxs[j] >= 0);
2039  assert(childidxs[j] < SCIPexprGetNChildren(expr));
2040 
2041  childval = buf[childidxs[j]];
2042  exponent = exponents[j];
2043 
2044  /* cover some special exponents separately to avoid calling expensive pow function */
2045  if( exponent == 0.0 )
2046  continue;
2047  if( exponent == 1.0 )
2048  {
2049  monomialval *= childval;
2050  continue;
2051  }
2052  if( (int)exponent == exponent )
2053  {
2054  Type tmp;
2055  evalIntPower(tmp, childval, (int)exponent);
2056  monomialval *= tmp;
2057  continue;
2058  }
2059  if( exponent == 0.5 )
2060  {
2061  Type tmp;
2062  evalSqrt(tmp, childval);
2063  monomialval *= tmp;
2064  continue;
2065  }
2066  monomialval *= pow(childval, exponent);
2067  }
2068 
2069  val += monomialval;
2070  }
2071 
2072  break;
2073  }
2074 
2075  case SCIP_EXPR_USER:
2076  evalUser(val, buf, expr);
2077  break;
2078 
2079  case SCIP_EXPR_LAST:
2080  default:
2081  BMSfreeMemoryArrayNull(&buf);
2082  return SCIP_ERROR;
2083  }
2084 
2085  BMSfreeMemoryArrayNull(&buf);
2086 
2087  return SCIP_OKAY;
2088 }
2089 
2090 /** analysis an expression tree whether it requires retaping on every evaluation
2091  *
2092  * This may be the case if the evaluation sequence depends on values of operands (e.g., in case of abs, sign, signpower, ...).
2093  */
2094 static
2096  SCIP_EXPRINTDATA* data,
2097  SCIP_EXPR* expr
2098  )
2099 {
2100  assert(expr != NULL);
2101  assert(SCIPexprGetChildren(expr) != NULL || SCIPexprGetNChildren(expr) == 0);
2102 
2103  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
2104  analyzeTree(data, SCIPexprGetChildren(expr)[i]);
2105 
2106  switch( SCIPexprGetOperator(expr) )
2107  {
2108  case SCIP_EXPR_MIN:
2109  case SCIP_EXPR_MAX:
2110  case SCIP_EXPR_ABS:
2111 #ifdef NO_CPPAD_USER_ATOMIC
2112  case SCIP_EXPR_SIGNPOWER:
2113 #endif
2114  data->need_retape_always = true;
2115  break;
2116 
2117  case SCIP_EXPR_USER:
2118  data->userevalcapability &= SCIPexprGetUserEvalCapability(expr);
2119  break;
2120 
2121  default: ;
2122  } /*lint !e788*/
2123 
2124 }
2125 
2126 /** replacement for CppAD's default error handler
2127  *
2128  * In debug mode, CppAD gives an error when an evaluation contains a nan.
2129  * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
2130  * Since we cannot ignore this particular error, we ignore all.
2131  * @todo find a way to check whether the error corresponds to a nan and communicate this back
2132  */
2133 static
2135  bool known, /**< is the error from a known source? */
2136  int line, /**< line where error occured */
2137  const char* file, /**< file where error occured */
2138  const char* cond, /**< error condition */
2139  const char* msg /**< error message */
2140  )
2141 {
2142  SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
2143 }
2144 
2145 /* install our error handler */
2146 static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
2147 
2148 /** gets name and version of expression interpreter */
2149 const char* SCIPexprintGetName(void)
2150 {
2151  return CPPAD_PACKAGE_STRING;
2152 }
2153 
2154 /** gets descriptive text of expression interpreter */
2155 const char* SCIPexprintGetDesc(void)
2156 {
2157  return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (www.coin-or.org/CppAD)";
2158 }
2159 
2160 /** gets capabilities of expression interpreter (using bitflags) */
2162  void
2163  )
2164 {
2168 }
2169 
2170 /** creates an expression interpreter object */
2172  BMS_BLKMEM* blkmem, /**< block memory data structure */
2173  SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
2174  )
2175 {
2176  assert(blkmem != NULL);
2177  assert(exprint != NULL);
2178 
2179  if( BMSallocMemory(exprint) == NULL )
2180  return SCIP_NOMEMORY;
2181 
2182  (*exprint)->blkmem = blkmem;
2183 
2184  return SCIP_OKAY;
2185 }
2186 
2187 /** frees an expression interpreter object */
2189  SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
2190  )
2191 {
2192  assert( exprint != NULL);
2193  assert(*exprint != NULL);
2194 
2195  BMSfreeMemory(exprint);
2196 
2197  return SCIP_OKAY;
2198 }
2199 
2200 /** compiles an expression tree and stores compiled data in expression tree */
2202  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2203  SCIP_EXPRTREE* tree /**< expression tree */
2204  )
2205 { /*lint --e{429} */
2206  assert(tree != NULL);
2207 
2209  if (!data)
2210  {
2211  data = new SCIP_EXPRINTDATA();
2212  assert( data != NULL );
2213  SCIPexprtreeSetInterpreterData(tree, data);
2214  SCIPdebugMessage("set interpreter data in tree %p to %p\n", (void*)tree, (void*)data);
2215  }
2216  else
2217  {
2218  data->need_retape = true;
2219  data->int_need_retape = true;
2220  }
2221 
2222  int n = SCIPexprtreeGetNVars(tree);
2223 
2224  data->X.resize(n);
2225  data->x.resize(n);
2226  data->Y.resize(1);
2227 
2228  data->int_X.resize(n);
2229  data->int_x.resize(n);
2230  data->int_Y.resize(1);
2231 
2232  if( data->root != NULL )
2233  {
2234  SCIPexprFreeDeep(exprint->blkmem, &data->root);
2235  }
2236 
2237  SCIP_EXPR* root = SCIPexprtreeGetRoot(tree);
2238 
2239  SCIP_CALL( SCIPexprCopyDeep(exprint->blkmem, &data->root, root) );
2240 
2241  data->blkmem = exprint->blkmem;
2242 
2243  analyzeTree(data, data->root);
2244 
2245  return SCIP_OKAY;
2246 }
2247 
2248 
2249 /** gives the capability to evaluate an expression by the expression interpreter
2250  *
2251  * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
2252  * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
2253  * Hessian for an expression is not available because it contains a user expression that does not provide
2254  * Hessians.
2255  */
2257  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2258  SCIP_EXPRTREE* tree /**< expression tree */
2259  )
2260 {
2261  assert(tree != NULL);
2262 
2264  assert(data != NULL);
2265 
2266  return data->userevalcapability;
2267 }/*lint !e715*/
2268 
2269 /** frees interpreter data */
2271  SCIP_EXPRINTDATA** interpreterdata /**< interpreter data that should freed */
2272  )
2273 {
2274  assert( interpreterdata != NULL);
2275  assert(*interpreterdata != NULL);
2276 
2277  if( (*interpreterdata)->root != NULL )
2278  SCIPexprFreeDeep((*interpreterdata)->blkmem, &(*interpreterdata)->root);
2279 
2280  delete *interpreterdata;
2281  *interpreterdata = NULL;
2282 
2283  return SCIP_OKAY;
2284 }
2285 
2286 /** notify expression interpreter that a new parameterization is used
2287  *
2288  * This probably causes retaping by AD algorithms.
2289  */
2291  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2292  SCIP_EXPRTREE* tree /**< expression tree */
2293  )
2294 {
2295  assert(exprint != NULL);
2296  assert(tree != NULL);
2297 
2299  if( data != NULL )
2300  {
2301  data->need_retape = true;
2302  data->int_need_retape = true;
2303  }
2304 
2305  return SCIP_OKAY;
2306 }
2307 
2308 /** evaluates an expression tree */
2310  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2311  SCIP_EXPRTREE* tree, /**< expression tree */
2312  SCIP_Real* varvals, /**< values of variables */
2313  SCIP_Real* val /**< buffer to store value */
2314  )
2315 {
2316  SCIP_EXPRINTDATA* data;
2317 
2318  assert(exprint != NULL);
2319  assert(tree != NULL);
2320  assert(varvals != NULL);
2321  assert(val != NULL);
2322 
2323  data = SCIPexprtreeGetInterpreterData(tree);
2324  assert(data != NULL);
2325  assert(SCIPexprtreeGetNVars(tree) == (int)data->X.size());
2326  assert(SCIPexprtreeGetRoot(tree) != NULL);
2327 
2328  int n = SCIPexprtreeGetNVars(tree);
2329 
2330  if( n == 0 )
2331  {
2332  SCIP_CALL( SCIPexprtreeEval(tree, NULL, val) );
2333  return SCIP_OKAY;
2334  }
2335 
2336  if( data->need_retape_always || data->need_retape )
2337  {
2338  for( int i = 0; i < n; ++i )
2339  {
2340  data->X[i] = varvals[i];
2341  data->x[i] = varvals[i];
2342  }
2343 
2344  CppAD::Independent(data->X);
2345 
2346  if( data->root != NULL )
2347  SCIP_CALL( eval(data->root, data->X, SCIPexprtreeGetParamVals(tree), data->Y[0]) );
2348  else
2349  data->Y[0] = 0.0;
2350 
2351  data->f.Dependent(data->X, data->Y);
2352 
2353  data->val = Value(data->Y[0]);
2354  SCIPdebugMessage("Eval retaped and computed value %g\n", data->val);
2355 
2356  // the following is required if the gradient shall be computed by a reverse sweep later
2357  // data->val = data->f.Forward(0, data->x)[0];
2358 
2359  data->need_retape = false;
2360  }
2361  else
2362  {
2363  assert((int)data->x.size() >= n);
2364  for( int i = 0; i < n; ++i )
2365  data->x[i] = varvals[i];
2366 
2367  data->val = data->f.Forward(0, data->x)[0]; /*lint !e1793*/
2368  SCIPdebugMessage("Eval used forward sweep to compute value %g\n", data->val);
2369  }
2370 
2371  *val = data->val;
2372 
2373  return SCIP_OKAY;
2374 }
2375 
2376 /** evaluates an expression tree on intervals */
2378  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2379  SCIP_EXPRTREE* tree, /**< expression tree */
2380  SCIP_Real infinity, /**< value for infinity */
2381  SCIP_INTERVAL* varvals, /**< interval values of variables */
2382  SCIP_INTERVAL* val /**< buffer to store interval value of expression */
2383  )
2384 {
2385  SCIP_EXPRINTDATA* data;
2386 
2387  assert(exprint != NULL);
2388  assert(tree != NULL);
2389  assert(varvals != NULL);
2390  assert(val != NULL);
2391 
2392  data = SCIPexprtreeGetInterpreterData(tree);
2393  assert(data != NULL);
2394  assert(SCIPexprtreeGetNVars(tree) == (int)data->int_X.size());
2395  assert(SCIPexprtreeGetRoot(tree) != NULL);
2396 
2397  int n = SCIPexprtreeGetNVars(tree);
2398 
2399  if( n == 0 )
2400  {
2401  SCIP_CALL( SCIPexprtreeEvalInt(tree, infinity, NULL, val) );
2402  return SCIP_OKAY;
2403  }
2404 
2406 
2407  if( data->int_need_retape || data->need_retape_always )
2408  {
2409  for( int i = 0; i < n; ++i )
2410  {
2411  data->int_X[i] = varvals[i];
2412  data->int_x[i] = varvals[i];
2413  }
2414 
2415  CppAD::Independent(data->int_X);
2416 
2417  if( data->root != NULL )
2418  SCIP_CALL( eval(data->root, data->int_X, SCIPexprtreeGetParamVals(tree), data->int_Y[0]) );
2419  else
2420  data->int_Y[0] = 0.0;
2421 
2422  data->int_f.Dependent(data->int_X, data->int_Y);
2423 
2424  data->int_val = Value(data->int_Y[0]);
2425 
2426  data->int_need_retape = false;
2427  }
2428  else
2429  {
2430  assert((int)data->int_x.size() >= n);
2431  for( int i = 0; i < n; ++i )
2432  data->int_x[i] = varvals[i];
2433 
2434  data->int_val = data->int_f.Forward(0, data->int_x)[0]; /*lint !e1793*/
2435  }
2436 
2437  *val = data->int_val;
2438 
2439  return SCIP_OKAY;
2440 }
2441 
2442 /** computes value and gradient of an expression tree */
2444  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2445  SCIP_EXPRTREE* tree, /**< expression tree */
2446  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2447  SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
2448  SCIP_Real* val, /**< buffer to store expression value */
2449  SCIP_Real* gradient /**< buffer to store expression gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2450  )
2451 {
2452  assert(exprint != NULL);
2453  assert(tree != NULL);
2454  assert(varvals != NULL || new_varvals == FALSE);
2455  assert(val != NULL);
2456  assert(gradient != NULL);
2457 
2459  assert(data != NULL);
2460 
2461  if( new_varvals )
2462  {
2463  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2464  }
2465  else
2466  *val = data->val;
2467 
2468  int n = SCIPexprtreeGetNVars(tree);
2469 
2470  if( n == 0 )
2471  return SCIP_OKAY;
2472 
2473  vector<double> jac(data->f.Jacobian(data->x));
2474 
2475  for( int i = 0; i < n; ++i )
2476  gradient[i] = jac[i];
2477 
2478 /* disable debug output since we have no message handler here
2479 #ifdef SCIP_DEBUG
2480  SCIPdebugMessage("Grad for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2481  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2482  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t %g", gradient[i]); printf("\n");
2483 #endif
2484 */
2485 
2486  return SCIP_OKAY;
2487 }
2488 
2489 /** computes interval value and interval gradient of an expression tree */
2491  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2492  SCIP_EXPRTREE* tree, /**< expression tree */
2493  SCIP_Real infinity, /**< value for infinity */
2494  SCIP_INTERVAL* varvals, /**< interval values of variables, can be NULL if new_varvals is FALSE */
2495  SCIP_Bool new_varvals, /**< have variable interval values changed since last call to an interval evaluation routine? */
2496  SCIP_INTERVAL* val, /**< buffer to store expression interval value */
2497  SCIP_INTERVAL* gradient /**< buffer to store expression interval gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
2498  )
2499 {
2500  assert(exprint != NULL);
2501  assert(tree != NULL);
2502  assert(varvals != NULL || new_varvals == false);
2503  assert(val != NULL);
2504  assert(gradient != NULL);
2505 
2507  assert(data != NULL);
2508 
2509  if (new_varvals)
2510  SCIP_CALL( SCIPexprintEvalInt(exprint, tree, infinity, varvals, val) );
2511  else
2512  *val = data->int_val;
2513 
2514  int n = SCIPexprtreeGetNVars(tree);
2515 
2516  if( n == 0 )
2517  return SCIP_OKAY;
2518 
2519  vector<SCIPInterval> jac(data->int_f.Jacobian(data->int_x));
2520 
2521  for (int i = 0; i < n; ++i)
2522  gradient[i] = jac[i];
2523 
2524 /* disable debug output since we have no message handler here
2525 #ifdef SCIP_DEBUG
2526  SCIPdebugMessage("GradInt for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2527  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(data->int_x[i]), SCIPintervalGetSup(data->int_x[i])); printf("\n");
2528  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(gradient[i]), SCIPintervalGetSup(gradient[i])); printf("\n");
2529 #endif
2530 */
2531 
2532  return SCIP_OKAY;
2533 }
2534 
2535 /** gives sparsity pattern of hessian
2536  *
2537  * NOTE: this function might be replaced later by something nicer.
2538  * Since the AD code might need to do a forward sweep, you should pass variable values in here.
2539  */
2541  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2542  SCIP_EXPRTREE* tree, /**< expression tree */
2543  SCIP_Real* varvals, /**< values of variables */
2544  SCIP_Bool* sparsity /**< buffer to store sparsity pattern of Hessian, sparsity[i+n*j] indicates whether entry (i,j) is nonzero in the hessian */
2545  )
2546 {
2547  assert(exprint != NULL);
2548  assert(tree != NULL);
2549  assert(varvals != NULL);
2550  assert(sparsity != NULL);
2551 
2553  assert(data != NULL);
2554 
2555  int n = SCIPexprtreeGetNVars(tree);
2556  if( n == 0 )
2557  return SCIP_OKAY;
2558 
2559  int nn = n*n;
2560 
2561  if( data->need_retape_always )
2562  {
2563  // @todo can we do something better here, e.g., by looking at the expression tree by ourself?
2564 
2565  for( int i = 0; i < nn; ++i )
2566  sparsity[i] = TRUE;
2567 
2568 /* disable debug output since we have no message handler here
2569 #ifdef SCIP_DEBUG
2570  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2571  SCIPdebugMessage("sparsity = all elements, due to discontinuouities\n");
2572 #endif
2573 */
2574 
2575  return SCIP_OKAY;
2576  }
2577 
2578  if( data->need_retape )
2579  {
2580  SCIP_Real val;
2581  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, &val) );
2582  }
2583 
2584  SCIPdebugMessage("calling ForSparseJac\n");
2585 
2586  vector<bool> r(nn, false);
2587  for (int i = 0; i < n; ++i)
2588  r[i*n+i] = true; /*lint !e647 !e1793*/
2589  (void) data->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
2590 
2591  SCIPdebugMessage("calling RevSparseHes\n");
2592 
2593  vector<bool> s(1, true);
2594  vector<bool> sparsehes(data->f.RevSparseHes(n, s));
2595 
2596  for( int i = 0; i < nn; ++i )
2597  sparsity[i] = sparsehes[i];
2598 
2599 /* disable debug output since we have no message handler here
2600 #ifdef SCIP_DEBUG
2601  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2602  SCIPdebugMessage("sparsity ="); for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) if (sparsity[i*n+j]) printf(" (%d,%d)", i, j); printf("\n");
2603 #endif
2604 */
2605 
2606  return SCIP_OKAY;
2607 }
2608 
2609 /** computes value and dense hessian of an expression tree
2610  *
2611  * The full hessian is computed (lower left and upper right triangle).
2612  */
2614  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2615  SCIP_EXPRTREE* tree, /**< expression tree */
2616  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2617  SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
2618  SCIP_Real* val, /**< buffer to store function value */
2619  SCIP_Real* hessian /**< buffer to store hessian values, need to have size at least n*n */
2620  )
2621 {
2622  assert(exprint != NULL);
2623  assert(tree != NULL);
2624  assert(varvals != NULL || new_varvals == FALSE);
2625  assert(val != NULL);
2626  assert(hessian != NULL);
2627 
2629  assert(data != NULL);
2630 
2631  if( new_varvals )
2632  {
2633  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2634  }
2635  else
2636  *val = data->val;
2637 
2638  int n = SCIPexprtreeGetNVars(tree);
2639 
2640  if( n == 0 )
2641  return SCIP_OKAY;
2642 
2643 #if 1
2644  /* this one uses reverse mode */
2645  vector<double> hess(data->f.Hessian(data->x, 0));
2646 
2647  int nn = n*n;
2648  for (int i = 0; i < nn; ++i)
2649  hessian[i] = hess[i];
2650 
2651 #else
2652  /* this one uses forward mode */
2653  for( int i = 0; i < n; ++i )
2654  for( int j = 0; j < n; ++j )
2655  {
2656  vector<int> ii(1,i);
2657  vector<int> jj(1,j);
2658  hessian[i*n+j] = data->f.ForTwo(data->x, ii, jj)[0];
2659  }
2660 #endif
2661 
2662 /* disable debug output since we have no message handler here
2663 #ifdef SCIP_DEBUG
2664  SCIPdebugMessage("HessianDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2665  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2666  SCIPdebugMessage("hess ="); for (int i = 0; i < n*n; ++i) printf("\t %g", hessian[i]); printf("\n");
2667 #endif
2668 */
2669  return SCIP_OKAY;
2670 }
void SCIPexprFreeDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **expr)
Definition: expr.c:6183
SCIPInterval square(const SCIPInterval &x)
bool LessThanZero(const SCIPInterval &x)
static void evalMax(Type &resultant, const Type &arg1, const Type &arg2)
const char * SCIPexprintGetName(void)
bool IdenticalEqualPar(const SCIPInterval &x, const SCIPInterval &y)
static SCIP_RETCODE eval(SCIP_EXPR *expr, const vector< Type > &x, SCIP_Real *param, Type &val)
SCIP_RETCODE SCIPexprtreeEvalInt(SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
Definition: expr.c:8739
#define BMSfreeMemoryArrayNull(ptr)
Definition: memory.h:130
methods to interpret (evaluate) an expression tree "fast"
int * SCIPexprGetMonomialChildIndices(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5920
static size_t thread_num(void)
static void evalUser(Type &resultant, const Type *args, SCIP_EXPR *expr)
SCIP_EXPROP SCIPexprGetOperator(SCIP_EXPR *expr)
Definition: expr.c:5693
#define infinity
Definition: gastrans.c:71
static void evalSignPower(Type &resultant, const Type &arg, SCIP_EXPR *expr)
SCIPInterval pow(const SCIPInterval &x, const SCIPInterval &y)
SCIP_RETCODE SCIPexprintGradInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_Bool new_varvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient)
SCIP_Real * SCIPexprtreeGetParamVals(SCIP_EXPRTREE *tree)
Definition: expr.c:8632
#define SCIP_EXPRINTCAPABILITY_INTGRADIENT
SCIP_Real SCIPexprGetRealPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5756
int SCIPexprGetOpIndex(SCIP_EXPR *expr)
Definition: expr.c:5723
SCIPInterval cos(const SCIPInterval &x)
SCIP_Real SCIPexprGetPolynomialConstant(SCIP_EXPR *expr)
Definition: expr.c:5888
#define FALSE
Definition: def.h:64
#define TRUE
Definition: def.h:63
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:53
SCIP_RETCODE SCIPexprintCompile(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIPInterval exp(const SCIPInterval &x)
const char * SCIPexprintGetDesc(void)
static void evalAbs(Type &resultant, const Type &arg)
#define BMSallocMemoryArray(ptr, num)
Definition: memory.h:105
static size_t ncurthreads
#define CPPAD_MAX_NUM_THREADS
#define SCIPdebugMessage
Definition: pub_message.h:77
static void analyzeTree(SCIP_EXPRINTDATA *data, SCIP_EXPR *expr)
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprtreeCapability(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
unsigned int SCIP_EXPRINTCAPABILITY
#define BMSfreeMemory(ptr)
Definition: memory.h:127
static char init_parallel(void)
static void evalMin(Type &resultant, const Type &arg1, const Type &arg2)
SCIPInterval abs(const SCIPInterval &x)
SCIP_EXPRDATA_MONOMIAL ** SCIPexprGetMonomials(SCIP_EXPR *expr)
Definition: expr.c:5864
public methods for expressions, expression trees, expression graphs, and related stuff ...
int SCIPexprGetMonomialNFactors(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5910
int SCIPexprGetIntPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5767
#define SCIP_EXPRINTCAPABILITY_INTFUNCVALUE
SCIPInterval CondExpOp(enum CppAD::CompareOp cop, const SCIPInterval &left, const SCIPInterval &right, const SCIPInterval &trueCase, const SCIPInterval &falseCase)
SCIPInterval signpow(const SCIPInterval &x, const SCIP_Real p)
SCIP_Real coef
Definition: type_expr.h:102
SCIP_Real inf
Definition: intervalarith.h:39
static bool univariate_for_sparse_jac(size_t q, const CppAD::vector< bool > &r, CppAD::vector< bool > &s)
bool IdenticalOne(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintEval(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
SCIP_RETCODE SCIPexprintCreate(BMS_BLKMEM *blkmem, SCIP_EXPRINT **exprint)
SCIP_Real * SCIPexprGetQuadLinearCoefs(SCIP_EXPR *expr)
Definition: expr.c:5840
static void cppaderrorcallback(bool known, int line, const char *file, const char *cond, const char *msg)
SCIP_RETCODE SCIPexprCopyDeep(BMS_BLKMEM *blkmem, SCIP_EXPR **targetexpr, SCIP_EXPR *sourceexpr)
Definition: expr.c:6141
SCIPInterval sqrt(const SCIPInterval &x)
SCIPInterval sign(const SCIPInterval &x)
SCIP_Real SCIPexprGetQuadConstant(SCIP_EXPR *expr)
Definition: expr.c:5827
#define SIGN(x)
#define REALABS(x)
Definition: def.h:173
int SCIPexprtreeGetNVars(SCIP_EXPRTREE *tree)
Definition: expr.c:8612
#define SCIP_CALL(x)
Definition: def.h:350
SCIP_RETCODE SCIPexprEvalUser(SCIP_EXPR *expr, SCIP_Real *argvals, SCIP_Real *val, SCIP_Real *gradient, SCIP_Real *hessian)
Definition: expr.c:7969
SCIP_Real sup
Definition: intervalarith.h:40
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
SCIP_RETCODE exprEvalUser(SCIP_EXPR *expr, Type *x, Type &funcval, Type *gradient, Type *hessian)
SCIP_EXPR * SCIPexprtreeGetRoot(SCIP_EXPRTREE *tree)
Definition: expr.c:8602
static void evalSqrt(Type &resultant, const Type &arg)
SCIP_EXPRINTCAPABILITY SCIPexprGetUserEvalCapability(SCIP_EXPR *expr)
Definition: expr.c:5962
#define SCIP_EXPRINTCAPABILITY_ALL
#define SCIP_EXPRINTCAPABILITY_HESSIAN
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
Definition: expr.c:5713
SCIP_Real SCIPexprGetSignPowerExponent(SCIP_EXPR *expr)
Definition: expr.c:5778
bool IdenticalZero(const SCIPInterval &x)
#define SCIP_Bool
Definition: def.h:61
SCIPInterval azmul(const SCIPInterval &x, const SCIPInterval &y)
SCIP_RETCODE SCIPexprintNewParametrization(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree)
SCIPInterval sin(const SCIPInterval &x)
void SCIPexprSortQuadElems(SCIP_EXPR *expr)
Definition: expr.c:6620
SCIP_RETCODE SCIPexprintEvalInt(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real infinity, SCIP_INTERVAL *varvals, SCIP_INTERVAL *val)
static pthread_mutex_t cppadmutex
SCIP_EXPRINTDATA * SCIPexprtreeGetInterpreterData(SCIP_EXPRTREE *tree)
Definition: expr.c:8657
unsigned short Type
Definition: cons_xor.c:91
#define MAX(x, y)
Definition: tclique_def.h:75
int SCIPexprGetNChildren(SCIP_EXPR *expr)
Definition: expr.c:5703
static bool univariate_rev_sparse_jac(size_t q, const CppAD::vector< bool > &r, CppAD::vector< bool > &s)
SCIPInterval log(const SCIPInterval &x)
SCIP_RETCODE SCIPexprintHessianDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *hessian)
SCIP_Real SCIPexprGetMonomialCoef(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5900
static CppAD::ErrorHandler errorhandler(cppaderrorcallback)
SCIP_RETCODE SCIPexprintGrad(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
static void evalIntPower(Type &resultant, const Type &arg, const int exponent)
bool GreaterThanZero(const SCIPInterval &x)
SCIP_RETCODE SCIPexprEvalIntUser(SCIP_EXPR *expr, SCIP_Real infinity, SCIP_INTERVAL *argvals, SCIP_INTERVAL *val, SCIP_INTERVAL *gradient, SCIP_INTERVAL *hessian)
Definition: expr.c:7992
bool IdenticalPar(const SCIPInterval &x)
#define SCIP_DEFAULT_INFINITY
Definition: def.h:154
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
SCIP_Real * SCIPexprGetLinearCoefs(SCIP_EXPR *expr)
Definition: expr.c:5789
SCIP_Real SCIPexprGetOpReal(SCIP_EXPR *expr)
Definition: expr.c:5734
SCIP_QUADELEM * SCIPexprGetQuadElements(SCIP_EXPR *expr)
Definition: expr.c:5815
std::ostream & operator<<(std::ostream &out, const SCIP_INTERVAL &x)
int SCIPexprGetNMonomials(SCIP_EXPR *expr)
Definition: expr.c:5876
int Integer(const SCIPInterval &x)
#define SCIP_Real
Definition: def.h:149
static bool in_parallel(void)
#define BMSallocMemory(ptr)
Definition: memory.h:101
static pthread_key_t thread_specific_key
SCIP_RETCODE SCIPexprintHessianSparsityDense(SCIP_EXPRINT *exprint, SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Bool *sparsity)
SCIP_Real * SCIPexprGetMonomialExponents(SCIP_EXPRDATA_MONOMIAL *monomial)
Definition: expr.c:5930
bool LessThanOrZero(const SCIPInterval &x)
static bool univariate_rev_sparse_hes(const CppAD::vector< bool > &vx, const CppAD::vector< bool > &s, CppAD::vector< bool > &t, size_t q, const CppAD::vector< bool > &r, const CppAD::vector< bool > &u, CppAD::vector< bool > &v)
SCIP_RETCODE SCIPexprintFreeData(SCIP_EXPRINTDATA **interpreterdata)
SCIP_RETCODE SCIPexprintFree(SCIP_EXPRINT **exprint)
common defines and data types used in all packages of SCIP
struct BMS_BlkMem BMS_BLKMEM
Definition: memory.h:419
SCIP_Real SCIPexprGetLinearConstant(SCIP_EXPR *expr)
Definition: expr.c:5802
void SCIPexprtreeSetInterpreterData(SCIP_EXPRTREE *tree, SCIP_EXPRINTDATA *interpreterdata)
Definition: expr.c:8667
struct SCIP_ExprIntData SCIP_EXPRINTDATA
int SCIPexprGetNQuadElements(SCIP_EXPR *expr)
Definition: expr.c:5852
bool GreaterThanOrZero(const SCIPInterval &x)
#define SCIP_EXPRINTCAPABILITY_GRADIENT
SCIP_RETCODE SCIPexprtreeEval(SCIP_EXPRTREE *tree, SCIP_Real *varvals, SCIP_Real *val)
Definition: expr.c:8723
static void posintpower(const vector< Type > &in, vector< Type > &out, size_t exponent)
C++ extensions to interval arithmetics for provable bounds.
memory allocation routines