Package SloppyCell :: Package ReactionNetworks :: Module Dynamics
[hide private]

Source Code for Module SloppyCell.ReactionNetworks.Dynamics

   1  """ 
   2  Methods for evaluating the dynamics of Network. 
   3  """ 
   4  __docformat__ = "restructuredtext en" 
   5   
   6  import copy 
   7  import sets 
   8  import sys  
   9  import scipy 
  10  import scipy.optimize 
  11   
  12  import logging 
  13  logger = logging.getLogger('ReactionNetworks.Dynamics') 
  14   
  15  import SloppyCell.daskr as daskr 
  16  daeint = daskr.daeint 
  17  import Trajectory_mod 
  18   
  19  import SloppyCell.Utility as Utility 
  20  from SloppyCell.ReactionNetworks.Components import event_info 
  21  from SloppyCell import HAVE_PYPAR, my_rank, my_host, num_procs 
  22  if HAVE_PYPAR: 
  23      import pypar 
  24   
  25  _double_epsilon_ = scipy.finfo(scipy.float_).eps 
  26  _double_tiny_ = scipy.finfo(scipy.float_).tiny 
  27   
  28  # This specifies the maximum number of steps daeint will take between timepoints 
  29  MAX_STEPS = 1e5 
  30   
  31  # global_rtol is the default relative tolerance to use if none is specied in 
  32  # calls. 
  33  global_rtol = 1e-6 
  34  # global_atol overrides the default of rtol*typical_value. It may be useful if 
  35  # we're having problems with large values going negative. (Although then it 
  36  # might be better to just use logarithmic integration.) 
  37  global_atol = None 
  38  global_hmax = None 
  39   
  40  return_derivs = False # do we want time derivatives of all trajectories  
  41                        # returned? 
  42  reduce_space = 0 # we may want to not return every timepoint but only 0,  
  43                   # reduce_space, 2*reduce_space etc. 
  44   
  45  failed_args = [] 
  46  failed_kwargs = {} 
  47   
48 -class IntegrationException(Utility.SloppyCellException):
49 pass
50
51 -class FixedPointException(Utility.SloppyCellException):
52 pass
53
54 -def integrate_tidbit(net, res_func, Dfun, root_func, IC, yp0, curTimes, 55 rtol, atol, fill_traj, return_derivs, 56 redirect_msgs, calculate_ic, var_types):
57 N_dyn_var = len(net.dynamicVars) 58 59 if net.integrateWithLogs: 60 yp0 = yp0/IC 61 IC = scipy.log(IC) 62 63 res_func = net.res_function_logdv 64 Dfun = None 65 root_func = net.root_func_logdv 66 67 atol = rtol 68 rtol = [0]*len(rtol) 69 70 if root_func is None: 71 nrt = 0 72 else: 73 nrt = net.len_root_func 74 75 int_args = {'res': res_func, 76 't': curTimes, 77 'y0': IC, 78 'yp0': yp0, 79 80 'jac': Dfun, 81 'nrt': nrt, 82 'rt': root_func, 83 84 'rpar': net.constantVarValues, 85 86 'rtol': rtol, 87 'atol': atol, 88 'max_steps': MAX_STEPS, 89 90 'intermediate_output': fill_traj, 91 'redir_output': redirect_msgs, 92 93 'calculate_ic' : calculate_ic, 94 'var_types' : var_types, 95 'hmax' : global_hmax, 96 } 97 98 exception_raised = False 99 try: 100 int_returns = daeint(**int_args) 101 except Utility.SloppyCellException, X: 102 # When an exception happens, we'll try to return all the 103 # trajectory we can, thus we don't reraise yet. 104 exception_raised = X 105 # We store the arguments to daeint that failed as 106 # Dynamics.failed_kwargs for debugging purproses. 107 global failed_kwargs 108 failed_kwargs = int_args 109 # Recover what we have of int_returns from the exception, 110 # so we can process it. 111 int_returns = X.args[1] 112 113 # Pull things out of what daeint returned 114 y_this, t_this = int_returns[0], int_returns[1] 115 if return_derivs: 116 ydt_this = int_returns[2] 117 else: 118 ydt_this = [] 119 120 t_root_this, y_root_this, i_root_this = int_returns[3:6] 121 122 info_dict = int_returns[-1] 123 124 # Deal with integrateWithLogs 125 if net.integrateWithLogs: 126 y_this = scipy.exp(y_this) 127 ydt_this = ydt_this * y_this 128 y_root_this = scipy.exp(y_root_this) 129 130 return exception_raised, y_this, t_this, ydt_this,\ 131 t_root_this, y_root_this, i_root_this
132
133 -def generate_tolerances(net, rtol, atol=None):
134 if rtol == None: 135 rtol = global_rtol 136 if scipy.isscalar(rtol): 137 rtol = scipy.ones(len(net.dynamicVars)) * rtol 138 139 # We set atol to be a minimum of global_atol to avoid variables with large 140 # typical values going negative. 141 if (scipy.isscalar(atol) or atol==None): 142 typ_vals = [abs(net.get_var_typical_val(id)) 143 for id in net.dynamicVars.keys()] 144 atol = rtol * scipy.asarray(typ_vals) 145 if global_atol: 146 atol = scipy.minimum(atol, global_atol) 147 return rtol, atol
148 149
150 -def integrate(net, times, rtol=None, atol=None, params=None, fill_traj=True, 151 return_events=False, return_derivs=False, 152 redirect_msgs=True, calculate_ic = False, 153 include_extra_event_info = False, use_constraints=True):
154 """ 155 Integrate a Network, returning a Trajectory. 156 157 net The Network to integrate. 158 times A sequence of times to include in the integration output. 159 params Parameters for the integration. If params=None, the current 160 parameters of net are used. 161 rtol Relative error tolerance for this integration. 162 If rtol = None then relative tolerance is set by 163 Dynamics.global_rtol. 164 atol Absolute error tolerance for this integration. 165 fill_traj If True, the integrator will add the times it samples to 166 the returned trajectory. This is slower, but the resulting 167 trajectory will be more densely sampled where the dynamics 168 change quickly. 169 return_events If True, the output is (traj, te, ye, ie). te is a sequence 170 of times at which events fired. ye is a sequence of the 171 dynamic variable values where the events fired. ie is the 172 index of the fired events. 173 return_derivs If True, the returned trajectory records the time derivatives 174 of all quantities. 175 redirect_msgs If False, the errors and other output generated by the 176 integrator will be returned to the display. 177 calculate_ic If True, the integrator will calculate consistent initial 178 conditions. 179 include_extra_event_info If True, the returned trajectory will have more 180 detailed event information, inclusing pre- and post- execution 181 state, and assigned variable informtaion. 182 use_constraints If True, and if the network has constraints, will raise 183 an exception if a constraint's math function becomes True 184 at any time. 185 """ 186 logger.debug('Integrating network %s.' % net.get_id()) 187 188 if params is not None: 189 net.update_optimizable_vars(params) 190 constants = net.constantVarValues 191 # On some systems, f2py'd functions fail if len(constants) == 0. 192 if len(constants) == 0: 193 constants = [0] 194 # If you ask for time = 0, we'll assume you want dynamic variable values 195 # reset. 196 times = scipy.asarray(times) 197 if times[0] == 0: 198 net.resetDynamicVariables() 199 net.compile() 200 201 if (rtol == None or atol == None): 202 rtol, atol = generate_tolerances(net, rtol, atol) 203 204 # get the initial state, time, and derivative. 205 start = times[0] 206 IC = net.getDynamicVarValues() 207 # We start with ypIC equal to typ_vals so that we get the correct scale. 208 typ_vals = [net.get_var_typical_val(id) for id in net.dynamicVars.keys()] 209 ypIC = scipy.array(typ_vals) 210 # This calculates the yprime ICs and also ensures that values for our 211 # algebraic variables are all consistent. 212 IC, ypIC = find_ics(IC, ypIC, start, 213 net._dynamic_var_algebraic, rtol, atol, 214 net.constantVarValues, net, 215 redirect_msgs=redirect_msgs) 216 217 """ 218 219 1) After the initial conditions have been calculated, use 220 net.updateVariablesFromDynamicVars to make sure the Network object has 221 the proper variable values. 222 2) Then loop through the constraint events. 223 a) For each constraint, result = net.evaluate_expr(constraint.trigger). 224 b) If result is a violation (I don't know whether that means True 225 or False), raise the exception. 226 """ 227 # check that constraints are not violated at t=0 228 if use_constraints == True: 229 net.updateVariablesFromDynamicVars(values=IC, time=start) 230 for con_id, constraint in net.constraints.items(): 231 result = net.evaluate_expr(constraint.trigger) 232 if result == False: 233 raise Utility.ConstraintViolatedException(start, 234 constraint.trigger, 235 constraint.message) 236 237 238 239 # start variables for output storage 240 yout = scipy.zeros((0, len(IC)), scipy.float_) 241 youtdt = scipy.zeros((0, len(IC)), scipy.float_) 242 tout = [] 243 244 # For some reason, the F2Py wrapper for ddaskr seems to fail if this 245 # isn't wrapped up in a lambda... 246 res_func = net.res_function 247 root_func = net.root_func 248 249 # events_occured will hold event_info objects that describe the events 250 # in detail. 251 # te, ye, and ie are the times, dynamic variable values, and indices 252 # of fired events. Here mostly for historical reasons. 253 events_occurred = [] 254 te, ye, ie = [], [], [] 255 pendingEvents = {} 256 257 event_just_fired = False 258 event_just_executed = False 259 260 # This is the jacobian for use with ddaskr. 261 try: 262 _ddaskr_jac = net.ddaskr_jac 263 except AttributeError: 264 _ddaskr_jac = None 265 266 exception_raised = None 267 while start < times[-1]: 268 # check to see if there are any pendingEvents with execution time 269 # equal to the start time 270 # since some chained events may be added to the pending events list, 271 # keep looping until the key has been deleted 272 if pendingEvents and min(pendingEvents.keys()) < start: 273 raise ValueError('Missed an event!') 274 event_buffer = 0 275 while pendingEvents.has_key(start): 276 execution_time = start 277 # We need to backtrack to deal with this event... 278 event_list = pendingEvents[execution_time] 279 280 # Note: the correct behavior of chained events with events that have 281 # delays is not clear 282 # Note: When multiple events execute simultaneously, we check for 283 # chained events after all the simultaneously excecutions occur. 284 285 # store a copy of the value of root_func in order to check for 286 # chains of events 287 root_before = root_func(start, IC, ypIC, constants) 288 289 # For those events whose execution time == start, execute 290 # the event 291 # We also record the event that started this chain of events firing. 292 # This is necessary for sensitivity integration. 293 chain_starter = event_list[-1] 294 while len(event_list) > 0: 295 holder = event_list.pop() 296 297 # check whether the current event was a constraint 298 # technically this should be caught in fired_events, so the 299 # if statement below is deprecated and can be removed. 300 if holder.event_index >= len(net.events) and use_constraints == True: 301 # a constraint was violated 302 con_id = holder.event_id 303 constraint = net.constraints.get(con_id) 304 raise Utility.ConstraintViolatedException(start, 305 constraint.trigger, 306 constraint.message) 307 holder.time_exec = start 308 holder.y_pre_exec = copy.copy(IC) 309 holder.yp_pre_exec = copy.copy(ypIC) 310 IC = net.executeEvent(event=holder.event, 311 time_fired=start, 312 y_fired=holder.y_fired, 313 time_current=start, 314 y_current=IC) 315 # Now we update the initial conditions to reflect any changes 316 # made in event execution. This might include adjustments 317 # to algebraic variables and to yprime. 318 IC, ypIC = find_ics(IC, ypIC, start, 319 net._dynamic_var_algebraic, rtol, atol, 320 net.constantVarValues, net, 321 redirect_msgs=redirect_msgs) 322 holder.y_post_exec = copy.copy(IC) 323 holder.yp_post_exec = copy.copy(ypIC) 324 event_buffer = max(event_buffer, holder.event.buffer) 325 326 if event_buffer: 327 outputs = integrate_tidbit(net, res_func, _ddaskr_jac, 328 root_func=None, 329 IC=IC, yp0=ypIC, 330 curTimes=[start, start+event_buffer], 331 rtol=rtol, atol=atol, 332 fill_traj=fill_traj, 333 return_derivs=True, 334 redirect_msgs=redirect_msgs, 335 calculate_ic = False, 336 var_types=net._dynamic_var_algebraic) 337 338 exception_raised, yout_this, tout_this, youtdt_this,\ 339 t_root_this, y_root_this, i_root_this = outputs 340 341 yout = scipy.concatenate((yout, yout_this)) 342 youtdt = scipy.concatenate((youtdt, youtdt_this)) 343 tout.extend(tout_this) 344 345 if exception_raised: 346 break 347 348 start = tout[-1] 349 IC = copy.copy(yout[-1]) 350 ypIC = copy.copy(youtdt_this[-1]) 351 352 # Update the root state after all listed events have excecuted. 353 root_after = root_func(start, IC, ypIC, constants) 354 355 # Check for chained events/constraints 356 crossing_dirs = root_after - root_before 357 event_just_fired = fired_events(net, start, IC, ypIC, 358 crossing_dirs, 359 events_occurred, pendingEvents, 360 chained_off_of = chain_starter, 361 use_constraints=use_constraints) 362 event_just_executed = True 363 364 # If there are no more events to excecute at this time, then 365 # delete the entry for this time in pendingEvents. 366 # Otherwise we go back to the top of this loop, to excute the 367 # next event, check for firing events, etc. 368 if len(pendingEvents[execution_time]) == 0: 369 del pendingEvents[execution_time] 370 371 # We remove 'double timepoints' that we would otherwise have when 372 # an event fired, but didn't execute due to a delay. 373 if not event_just_executed and len(tout) > 0: 374 tout = tout[:-1] 375 yout = yout[:-1] 376 youtdt = youtdt[:-1] 377 event_just_executed = False 378 379 # If there are still pending events, set the nextEventTime 380 nextEventTime = scipy.inf 381 if pendingEvents: 382 nextEventTime = min(pendingEvents.keys()) 383 384 # If we have pending events, only integrate until the next one. 385 if nextEventTime < times[-1]: 386 curTimes = scipy.compress((times > start) & (times < nextEventTime), 387 times) 388 curTimes = scipy.concatenate(([start], curTimes, [nextEventTime])) 389 else: 390 curTimes = scipy.compress(times > start, times) 391 curTimes = scipy.concatenate(([start], curTimes)) 392 393 outputs = integrate_tidbit(net, res_func, _ddaskr_jac, 394 root_func=root_func, 395 IC=IC, yp0=ypIC, curTimes=curTimes, 396 rtol=rtol, atol=atol, 397 fill_traj=fill_traj, 398 return_derivs=True, 399 redirect_msgs=redirect_msgs, 400 calculate_ic = False, 401 var_types = net._dynamic_var_algebraic) 402 403 exception_raised, yout_this, tout_this, youtdt_this,\ 404 t_root_this, y_root_this, i_root_this = outputs 405 406 yout = scipy.concatenate((yout, yout_this)) 407 youtdt = scipy.concatenate((youtdt, youtdt_this)) 408 tout.extend(tout_this) 409 410 if exception_raised: 411 break 412 413 start = tout[-1] 414 IC = copy.copy(yout[-1]) 415 ypIC = copy.copy(youtdt_this[-1]) 416 417 # Check for events firing. 418 # If one of the fired events is a constraint, then we'll catch 419 # it at the top of the while loop for integration 420 event_just_fired = fired_events(net, start, IC, ypIC, 421 i_root_this, 422 events_occurred, pendingEvents, 423 use_constraints=use_constraints) 424 425 # End of while loop for integration. 426 if len(yout) and len(tout): 427 net.updateVariablesFromDynamicVars(yout[-1], tout[-1]) 428 429 if not fill_traj and not exception_raised: 430 yout = _reduce_times(yout, tout, times) 431 if return_derivs : 432 youtdt = _reduce_times(youtdt, tout, times) 433 tout = times 434 elif reduce_space : 435 # make sure we don't miss data times by adding them in. 436 # The set usage ensures that we don't end up with duplicates 437 filtered_times = [tout[i] for i in range(0,len(tout),reduce_space)] 438 filtered_times = sets.Set(filtered_times) 439 filtered_times.union_update(times) 440 filtered_times = scipy.sort(list(filtered_times)) 441 442 yout = _reduce_times(yout, tout, filtered_times) 443 if return_derivs : 444 youtdt = _reduce_times(youtdt, tout, filtered_times) 445 tout = filtered_times 446 447 trajectory = Trajectory_mod.Trajectory(net, holds_dt=return_derivs, 448 const_vals=net.constantVarValues) 449 if return_derivs : 450 yout = scipy.concatenate((yout,youtdt),axis=1) # join columnwise 451 452 trajectory.appendFromODEINT(tout, yout, holds_dt=return_derivs) 453 # Fill in the historical te, ye, ie lists 454 te, ye, ie, ye_post = [],[],[],[] 455 for holder in events_occurred: 456 te.append(holder.time_exec) 457 ye.append(holder.y_pre_exec) 458 ye_post.append(holder.y_post_exec) 459 ie.append(holder.event_index) 460 461 # We only include y_post_exec and assignned vars if requested 462 if include_extra_event_info == False: 463 trajectory.add_event_info(net, (te,ye,ie), tout[-1], 464 include_extra_event_info = include_extra_event_info) 465 elif include_extra_event_info == True: 466 trajectory.add_event_info(net, (te,ye,ye_post,ie), tout[-1], 467 include_extra_event_info = include_extra_event_info) 468 469 470 trajectory.events_occurred = events_occurred 471 net.trajectory = trajectory 472 473 # raise exception if exited integration loop prematurely 474 if exception_raised: 475 logger.warn('Integration ended prematurely in network %s on node %i.' 476 % (net.id, my_rank)) 477 raise exception_raised 478 479 return trajectory
480
481 -def fired_events(net, time, y, yp, crossing_dirs, 482 events_occurred, pendingEvents, 483 chained_off_of=None, use_constraints=False):
484 # checking for both normal events and constraint events 485 num_events = len(net.events)+len(net.constraints) 486 event_just_fired = False 487 # Check the directions of our root crossings to see whether events 488 # actually fired. DASKR automatically returns the direction of event 489 # crossings, so we can read this information from the integration 490 # results. 491 # Note that we might have more than one event firing at the same time. 492 493 for event_index, dire_cross in zip(range(num_events), crossing_dirs): 494 # dire_cross is the direction of the event crossing. If it's 495 # 1 that means Ri changed from negative to positive. In that case 496 # we need to record the event. Note that num_events runs over the 497 # real events, not the sub-clauses which are also in root_func. 498 if dire_cross > 0 and event_index < len(net.events): 499 event_just_fired = True 500 if event_index < len(net.events): 501 event = net.events[event_index] 502 503 logger.debug('Event %s fired at time=%g in network %s.' 504 % (event.id, time, net.id)) 505 506 # Which clauses of the event fired? 507 # These are the crossing_dirs's corresponding to the subclauses 508 sub_clause_dirs = crossing_dirs[num_events:] 509 # We need to add back in num_events for counting purposes 510 sub_clauses_fired = scipy.nonzero(sub_clause_dirs > 0)[0]\ 511 + num_events 512 # Now we check which, if any, of these sub_clauses actually 513 # belong to this event. This is only necessary because events 514 # may fire at the same time. 515 sub_clauses = [] 516 for fired_index in sub_clauses_fired: 517 if fired_index in event.sub_clause_indices: 518 sub_clauses.append(fired_index) 519 520 # If no sub-clauses fired, then it was just a simple event. 521 if len(sub_clauses) == 0: 522 clause_index = event_index 523 # But if one of the subclauses fired, we want to record that. 524 elif len(sub_clauses) == 1: 525 clause_index = sub_clauses.pop() 526 else: 527 # We might have a problem. 528 raise ValueError('More than one clause in event trigger %s ' 529 'changed simultaneously! Sensitivity ' 530 'integration will fail.' % event.trigger) 531 delay = net.fireEvent(event, y, time) 532 execution_time = time + delay 533 534 # We use this object to record which events have fired 535 holder = event_info() 536 holder.event = event 537 holder.event_index = event_index 538 holder.event_id = event.id 539 holder.clause_index = clause_index 540 holder.time_fired = time 541 holder.y_fired = copy.copy(y) 542 holder.yp_fired = copy.copy(yp) 543 holder.delay = delay 544 holder.chained_off_of = chained_off_of 545 events_occurred.append(holder) 546 547 # Add this event to the list of events that are supposed to 548 # execute. 549 if not pendingEvents.has_key(execution_time): 550 pendingEvents[execution_time] = [] 551 pendingEvents[execution_time].append(holder) 552 553 if dire_cross < 0 and event_index >= len(net.events) \ 554 and event_index <= len(net.events) + len(net.constraints): 555 event_just_fired = True 556 event = net.constraints[event_index-len(net.events)] 557 con_id = event.id 558 constraint = net.constraints.get(con_id) 559 if use_constraints == True: 560 raise Utility.ConstraintViolatedException(time, 561 constraint.trigger, 562 constraint.message) 563 564 565 return event_just_fired
566
567 -def integrate_sens_subset(net, times, rtol=None, 568 fill_traj=False, opt_vars=None, 569 return_derivs=False, redirect_msgs=True):
570 """ 571 Integrate the sensitivity equations for a list of optimizable variables. 572 """ 573 rtol, atol = generate_tolerances(net, rtol) 574 # We integrate the net once just to know where all our events are 575 traj = integrate(net, times, rtol=rtol, fill_traj=fill_traj, 576 return_events=False, return_derivs=return_derivs, 577 redirect_msgs=redirect_msgs) 578 579 N_dyn_vars = len(net.dynamicVars) 580 # Create the array that will hold all our sensitivities 581 out_size = (len(traj.get_times()), N_dyn_vars + N_dyn_vars*len(opt_vars)) 582 all_yout = scipy.zeros(out_size, scipy.float_) 583 584 # Copy the values from the trajectory into this array 585 for ii, dyn_var in enumerate(net.dynamicVars.keys()): 586 all_yout[:, ii] = traj.get_var_traj(dyn_var) 587 588 # Similarly for the derivative outputs 589 if return_derivs: 590 all_youtdt = scipy.zeros(out_size, scipy.float_) 591 for ii, dyn_var in enumerate(net.dynamicVars.keys()): 592 all_youtdt[:, ii] = traj.get_var_traj((dyn_var, 'time')) 593 594 # Now we integrate one optizable variable at a time 595 for ii, opt_var in enumerate(opt_vars): 596 single_out = integrate_sens_single(net, traj, rtol, opt_var, 597 return_derivs, redirect_msgs) 598 # Copy the result into our main arrays. 599 start_col = (ii+1) * N_dyn_vars 600 end_col = (ii+2) * N_dyn_vars 601 if not return_derivs: 602 all_yout[:, start_col:end_col] = single_out 603 else: 604 all_yout[:, start_col:end_col] = single_out[0] 605 all_youtdt[:, start_col:end_col] = single_out[1] 606 607 if not return_derivs: 608 return traj.get_times(), all_yout 609 else: 610 return traj.get_times(), all_yout, all_youtdt
611
612 -def integrate_sens_single(net, traj, rtol, opt_var, return_derivs, 613 redirect_msgs):
614 """ 615 Integrate the sensitivity equations for a single optimization variable. 616 """ 617 logger.debug('Integrating sensitivity for %s.' % opt_var) 618 opt_var_index = net.optimizableVars.index_by_key(opt_var) 619 N_dyn_vars = len(net.dynamicVars) 620 621 # Calculate tolerances for our sensitivity integration 622 rtol, atol = generate_tolerances(net, rtol) 623 if net.integrateWithLogs: 624 atol = rtol 625 rtol = [0] * len(net.dynamicVars) 626 # We set the same rtol for sensitivity variables as for the normal 627 # variables. 628 rtol_for_sens = scipy.concatenate((rtol, rtol)) 629 # Absolute tolerances depend on the typical value of the optimizable 630 # variable 631 atol_for_sens = atol/abs(net.get_var_typical_val(opt_var)) 632 atol_for_sens = scipy.concatenate((atol, atol_for_sens)) 633 634 # The passed-in normal trajectory tells us what times we need to return, 635 # and what events will happen. 636 times = traj.get_times() 637 times = scipy.asarray(times) 638 events_occurred = copy.copy(traj.events_occurred) 639 640 current_time = times[0] 641 642 # Initial conditions 643 IC = scipy.zeros(2*N_dyn_vars, scipy.float_) 644 for dvInd, (id, var) in enumerate(net.dynamicVars.items()): 645 # For normal integration 646 IC[dvInd] = traj.get_var_val(id, current_time) 647 # For sensitivity integration 648 if isinstance(var.initialValue, str): 649 DwrtOV = net.takeDerivative(var.initialValue, opt_var) 650 IC[N_dyn_vars + dvInd] = net.evaluate_expr(DwrtOV, 651 time=current_time) 652 653 # The index of the variable to calculate sensitivity wrt is passed in 654 # the last element of rpar. It is obtained by an (int) cast, so we add 655 # a small amount to make sure the cast doesn't round down stupidly. 656 rpar = scipy.concatenate((net.constantVarValues, [opt_var_index+0.1])) 657 658 # Our intial guess for ypIC will be based on the typical values 659 typ_vals = [net.get_var_typical_val(id) for id in net.dynamicVars.keys()]*2 660 ypIC = scipy.array(typ_vals) 661 ypIC[N_dyn_vars:] /= net.get_var_ic(opt_var) 662 663 tout = [] 664 sens_out = scipy.zeros((0, N_dyn_vars), scipy.float_) 665 sensdt_out = scipy.zeros((0, N_dyn_vars), scipy.float_) 666 667 fired_events = {} 668 executed_events = {} 669 for holder in events_occurred: 670 firing_time = holder.time_fired 671 fired_events.setdefault(firing_time, []) 672 fired_events[firing_time].append(holder) 673 674 execution_time = holder.time_exec 675 executed_events.setdefault(execution_time, []) 676 executed_events[execution_time].append(holder) 677 678 event_just_executed = False 679 while current_time < times[-1]: 680 logger.debug('sens_int current_time: %f' % current_time) 681 if executed_events and min(executed_events.keys()) < current_time: 682 raise ValueError('Missed an event execution in sensitivity ' 683 'integration!') 684 if executed_events.has_key(current_time): 685 event_list = executed_events[current_time] 686 for holder in event_list: 687 logger.debug('Executing event at time %f.' % current_time) 688 IC = net.executeEventAndUpdateSens(holder, IC, opt_var) 689 del executed_events[current_time] 690 event_just_executed = True 691 692 # We remove 'double timepoints' that we would otherwise have when 693 # an event fired, but didn't execute due to a delay. 694 if not event_just_executed and len(tout) > 0: 695 tout = tout[:-1] 696 sens_out = sens_out[:-1] 697 sensdt_out = sensdt_out[:-1] 698 event_just_executed = False 699 700 integrate_until = times[-1] 701 if fired_events: 702 integrate_until = min(integrate_until, min(fired_events.keys())) 703 if executed_events: 704 integrate_until = min(integrate_until, min(executed_events.keys())) 705 706 # The the list of times in this part of the integration 707 if integrate_until < times[-1]: 708 int_times = scipy.compress((times > current_time) 709 & (times < integrate_until), 710 times) 711 int_times = scipy.concatenate(([current_time], int_times, 712 [integrate_until])) 713 else: 714 int_times = scipy.compress(times > current_time, times) 715 int_times = scipy.concatenate(([current_time], int_times)) 716 717 ypIC = find_ypic_sens(IC, ypIC, current_time, 718 net._dynamic_var_algebraic, 719 rtol, atol_for_sens, rpar, net, opt_var) 720 721 sens_rhs = net.sens_rhs 722 if net.integrateWithLogs: 723 ypIC[:N_dyn_vars] *= IC[:N_dyn_vars] 724 IC[:N_dyn_vars] = scipy.log(IC[:N_dyn_vars]) 725 sens_rhs = net.sens_rhs_logdv 726 727 # Note that IC is not necessarily correct for the sensitivities of 728 # algebraic variables. We'll let ddaskr calculate them using 729 # calculate_ic = True. 730 # They will be correct in the returned trajectory. For now, we don't 731 # use d/dt of sensitivities, so it doesn't matter if those are wrong 732 # at single points in the trajectory. 733 var_types = scipy.concatenate((net._dynamic_var_algebraic, 734 net._dynamic_var_algebraic)) 735 try: 736 int_outputs = daeint(sens_rhs, int_times, 737 IC, ypIC, 738 rtol_for_sens, atol_for_sens, 739 rpar = rpar, 740 max_steps = MAX_STEPS, 741 var_types = var_types, 742 calculate_ic = True, 743 redir_output = redirect_msgs, 744 hmax = global_hmax) 745 except Utility.SloppyCellException, X: 746 logger.warn('Sensitivity integration failed for network %s on ' 747 'node %i during optimizable variable %s.' 748 % (net.id, my_rank, opt_var)) 749 raise 750 751 # Copy out the sensitivity values 752 yout_this = int_outputs[0] 753 youtdt_this = int_outputs[2] 754 sens_out = scipy.concatenate((sens_out, 755 yout_this[:,N_dyn_vars:].copy())) 756 sensdt_out = scipy.concatenate((sensdt_out, 757 youtdt_this[:,N_dyn_vars:].copy())) 758 tout.extend(int_times) 759 760 current_time, IC = tout[-1], yout_this[-1].copy() 761 ypIC = youtdt_this[-1] 762 if net.integrateWithLogs: 763 IC[:N_dyn_vars] = scipy.exp(IC[:N_dyn_vars]) 764 ypIC[:N_dyn_vars] *= IC[:N_dyn_vars] 765 766 if fired_events and min(fired_events.keys()) < current_time: 767 raise ValueError('Missed event firing in sensitivity integration!') 768 if fired_events.has_key(current_time): 769 logger.debug('Firing event at time %f.' % current_time) 770 event_list = fired_events[current_time] 771 while event_list: 772 holder = event_list.pop() 773 holder.ysens_fired = copy.copy(IC) 774 del fired_events[current_time] 775 776 sens_out = _reduce_times(sens_out, tout, times) 777 if not return_derivs: 778 return sens_out 779 else: 780 sensdt_out = _reduce_times(sensdt_out, tout, times) 781 return sens_out, sensdt_out
782
783 -def _parse_sens_result(result, net, opt_vars, yout, youtdt=None):
784 """ 785 Utility function for parsing the return from integrate_sens_subset 786 """ 787 n_dyn, n_opt = len(net.dynamicVars), len(net.optimizableVars) 788 for work_index, opt_var in enumerate(opt_vars): 789 net_index = net.optimizableVars.indexByKey(opt_var) 790 net_start = (net_index+1)*n_dyn 791 net_end = (net_index+2)*n_dyn 792 793 work_start = (work_index+1)*n_dyn 794 work_end = (work_index+2)*n_dyn 795 796 yout[:, net_start:net_end] = result[1][:, work_start:work_end] 797 if youtdt is not None: 798 youtdt[:, net_start: net_end] =\ 799 result[2][:, work_start:work_end]
800
801 -def integrate_sensitivity(net, times, params=None, rtol=None, 802 fill_traj=False, return_derivs=False, 803 redirect_msgs=True):
804 logger.debug('Entering integrate_sens on node %i' % my_rank) 805 806 times = scipy.array(times) 807 net.compile() 808 if times[0] == 0: 809 net.resetDynamicVariables() 810 811 if params is not None: 812 net.update_optimizable_vars(params) 813 814 # Assigned variables to process on each node. 815 vars_assigned = dict([(node, net.optimizableVars.keys()[node::num_procs]) 816 for node in range(num_procs)]) 817 818 # Send out jobs for workers 819 for worker in range(1, num_procs): 820 logger.debug('Sending to worker %i: %s' % (worker, 821 str(vars_assigned[worker]))) 822 command = 'Dynamics.integrate_sens_subset(net, times,'\ 823 'rtol, fill_traj, opt_vars, return_derivs, redirect_msgs=redir)' 824 args = {'net':net, 'times':times, 'rtol':rtol, 'fill_traj':fill_traj, 825 'opt_vars':vars_assigned[worker], 'return_derivs':return_derivs, 826 'redir': redirect_msgs} 827 pypar.send((command, args), worker) 828 829 logger.debug('Master doing vars %s' % str(vars_assigned[0])) 830 try: 831 result = integrate_sens_subset(net, times, rtol, fill_traj, 832 vars_assigned[0], return_derivs, 833 redirect_msgs=redirect_msgs) 834 except Utility.SloppyCellException: 835 # If the master encounters an exception, we must still wait and get 836 # replies from all the workers (even if we do nothing with them) so 837 # that communication stays synchronized. 838 for worker in range(1, num_procs): 839 pypar.receive(worker) 840 raise 841 842 # Begin pulling results together... 843 tout = result[0] 844 # Build the yout array 845 n_dyn, n_opt = len(net.dynamicVars), len(net.optimizableVars) 846 yout = scipy.zeros((len(tout), n_dyn * (n_opt+ 1)), scipy.float_) 847 848 # We use the master's result for the non-sensitivity values 849 yout[:, :n_dyn] = result[1][:, :n_dyn] 850 if return_derivs: 851 youtdt = scipy.zeros((len(tout), n_dyn * (n_opt+ 1)), scipy.float_) 852 youtdt[:, :n_dyn] = result[2][:, :n_dyn] 853 else: 854 youtdt = None 855 856 # Copy the sensitivity results into yout and (if necessary) youtdt 857 _parse_sens_result(result, net, vars_assigned[0], yout, youtdt) 858 859 # Now when we listen to the worker's replies, we store any exception they 860 # return in exception_raised. We'll reraise that exception after getting 861 # replies from all the workers. 862 exception_raised = None 863 for worker in range(1, num_procs): 864 logger.debug('Receiving result from worker %i.' % worker) 865 result = pypar.receive(worker) 866 if isinstance(result, Utility.SloppyCellException): 867 exception_raised = result 868 continue 869 _parse_sens_result(result, net, vars_assigned[worker], yout, youtdt) 870 871 if exception_raised: 872 raise exception_raised 873 874 ddv_dpTrajectory = Trajectory_mod.Trajectory(net, is_sens=True, 875 holds_dt=return_derivs) 876 if return_derivs: 877 yout = scipy.concatenate((yout, youtdt), axis=1) 878 ddv_dpTrajectory.appendSensFromODEINT(tout, yout, holds_dt = return_derivs) 879 880 net.trajectory = ddv_dpTrajectory 881 882 return ddv_dpTrajectory
883
884 -def dyn_var_fixed_point(net, dv0=None, with_logs=True, xtol=1e-6, time=0, 885 stability=False, fsolve_factor=100, 886 maxfev=10000):
887 """ 888 Return the dynamic variables values at the closest fixed point of the net. 889 890 dv0 Initial guess for the fixed point. If not given, the current state 891 of the net is used. 892 with_logs If True, the calculation is done in terms of logs of variables, 893 so that they cannot be negative. 894 xtol Tolerance to aim for. 895 time Time to plug into equations. 896 stability If True, return the stability for the fixed point. -1 indicates 897 stable node, +1 indicates unstable node, 0 indicates saddle 898 fsolve_factor 'factor' argument for fsolve. For more information, see 899 help(scipy.optimize.fsolve). Should be in range 0.1 to 100. 900 maxfev 'maxfev' argument for fsolve. For more information, see 901 help(scipy.optimize.fsolve). Should be an integer > 1. 902 """ 903 net.compile() 904 905 if dv0 is None: 906 dv0 = scipy.array(net.getDynamicVarValues()) 907 else: 908 dv0 = scipy.asarray(dv0) 909 910 consts = net.constantVarValues 911 912 zeros = scipy.zeros(len(dv0), scipy.float_) 913 if with_logs: 914 # We take the absolute value of dv0 to avoid problems from small 915 # numerical noise negative values. 916 if scipy.any(dv0 <= 0): 917 logger.warning('Non-positive values in initial guess for fixed ' 918 'point and with_logs = True. Rounding them up to ' 919 'double_tiny. The most negative value was %g.' 920 % min(dv0)) 921 dv0 = scipy.maximum(dv0, _double_tiny_) 922 923 # XXX: Would like to replace these with C'd versions, if it's holding 924 # any of our users up. 925 def func(logy): 926 return net.res_function_logdv(time, logy, zeros, consts)
927 def fprime(logy): 928 y = scipy.exp(logy) 929 y = scipy.maximum(y, _double_tiny_) 930 return net.dres_dc_function(time, y, zeros, consts) 931 932 x0 = scipy.log(dv0) 933 # To transform sigma_x to sigma_log_x, we divide by x. We can set 934 # our sigma_log_x use to be the mean of what our xtol would yield 935 # for each chemical. 936 xtol = scipy.mean(xtol/dv0) 937 else: 938 def func(y): 939 return net.res_function(time, y, zeros, consts) 940 def fprime(y): 941 return net.dres_dc_function(time, y, zeros, consts) 942 x0 = dv0 943 944 try: 945 dvFixed, infodict, ier, mesg =\ 946 scipy.optimize.fsolve(func, x0=x0.copy(), full_output=True, 947 fprime=fprime, 948 xtol=xtol, maxfev=maxfev, 949 factor=fsolve_factor) 950 except (scipy.optimize.minpack.error, ArithmeticError), X: 951 raise FixedPointException(('Failure in fsolve.', X)) 952 953 tiny = _double_epsilon_ 954 955 if with_logs: 956 dvFixed = scipy.exp(dvFixed) 957 958 if ier != 1: 959 if scipy.all(abs(dvFixed) < tiny) and not scipy.all(abs(x0) < 1e6*tiny): 960 # This is the case where the answer is zero, and our initial guess 961 # was reasonably large. In this case, the solver fails because 962 # it's looking at a relative tolerance, but it's not really a 963 # failure. 964 pass 965 else: 966 raise FixedPointException(mesg, infodict) 967 968 if not stability: 969 return dvFixed 970 else: 971 jac = net.dres_dc_function(time, dvFixed, zeros, consts) 972 u = scipy.linalg.eigvals(jac) 973 u = scipy.real_if_close(u) 974 if scipy.all(u < 0): 975 stable = -1 976 elif scipy.all(u > 0): 977 stable = 1 978 else: 979 stable = 0 980 return (dvFixed, stable) 981
982 -def find_ypic_sens(y, yp, time, var_types, rtol, atol_for_sens, constants, 983 net, opt_var, redirect_msgs=False):
984 # On some systems, the f2py'd functions don't like len(constants)=0. 985 if len(constants) == 0: 986 constants = [0] 987 var_types = scipy.asarray(var_types) 988 y = scipy.asarray(y, scipy.float_) 989 yp = scipy.asarray(yp, scipy.float_) 990 atol_for_sens = scipy.asarray(atol_for_sens) 991 992 N_dyn = len(var_types) 993 y = copy.copy(y) 994 yp = copy.copy(yp) 995 996 # Find the initial conditions for the normal variables 997 y_norm, yp_norm = find_ics(y[:N_dyn], yp[:N_dyn], time, 998 var_types, rtol, atol_for_sens[:N_dyn], 999 net.constantVarValues, net, 1000 redirect_msgs=redirect_msgs) 1001 # Copy the updated values into our y and yp arrays 1002 y[:N_dyn] = y_norm 1003 yp[:N_dyn] = yp_norm 1004 1005 # Now we can solve for yp for all the *non-algebraic* sensitivity variables 1006 # Notice that they are simply the appropriate residual evaluated when 1007 # yp for the sens variable is zero. 1008 yp[N_dyn:][var_types == 1] = 0 1009 res = net.sens_rhs(time, y, yp, constants) 1010 yp[N_dyn:][var_types == 1] = res[N_dyn:][var_types == 1] 1011 1012 return yp
1013
1014 -def find_ics(y, yp, time, var_types, rtol, atol, constants, net, 1015 redirect_msgs=False):
1016 # We use this to find consistent sets of initial conditions for our 1017 # integrations. (We don't let ddaskr do it, because it doesn't calculate 1018 # values for d(alg_var)/dt, and we need them for sensitivity integration.) 1019 1020 # On some systems, the f2py'd functions don't like len(constants)=0. 1021 if len(constants) == 0: 1022 constants = [0] 1023 var_types = scipy.asarray(var_types) 1024 atol = scipy.asarray(atol) 1025 rtol = scipy.asarray(rtol) 1026 # Note that we're copying y and yprime 1027 y = scipy.array(y, scipy.float_) 1028 yp = scipy.array(yp, scipy.float_) 1029 1030 N_alg = scipy.sum(var_types == -1) 1031 1032 dv_typ_vals = scipy.asarray([net.get_var_typical_val(id) 1033 for id in net.dynamicVars.keys()]) 1034 1035 if N_alg: 1036 # First we calculate a consistent set of algebraic variable values 1037 alg_vars_guess = y[var_types == -1] 1038 alg_typ_vals = dv_typ_vals[var_types == -1] 1039 possible_guesses = [alg_vars_guess, alg_typ_vals, 1040 scipy.ones(N_alg, scipy.float_)] 1041 redir = Utility.Redirector() 1042 if redirect_msgs: 1043 redir.start() 1044 try: 1045 for guess in possible_guesses: 1046 sln, infodict, ier, mesg = \ 1047 scipy.optimize.fsolve(net.alg_res_func, x0 = guess, 1048 xtol = min(rtol), 1049 args = (y, time, constants), 1050 full_output=True) 1051 sln = scipy.atleast_1d(sln) 1052 final_residuals = net.alg_res_func(sln, y, time, constants) 1053 if not scipy.any(abs(final_residuals) 1054 > abs(atol[var_types == -1])): 1055 # This is success. 1056 break 1057 else: 1058 message = ('Failed to calculate consistent algebraic values in ' 1059 'network %s.' % net.get_id()) 1060 raise Utility.SloppyCellException(message) 1061 finally: 1062 messages = redir.stop() 1063 1064 # Now plug those values into the current y 1065 y[var_types == -1] = sln 1066 1067 # The non-algebraic variable yprimes come straight from the residuals 1068 yp_non_alg = net.res_function(time, y, y*0, constants)[var_types == 1] 1069 yp[var_types == 1] = yp_non_alg 1070 1071 if not N_alg: 1072 return y, yp 1073 1074 # Now we need to figure out yprime for the algebraic vars 1075 curr_alg_yp = yp[var_types == -1] 1076 ones_arr = scipy.ones(N_alg, scipy.float_) 1077 # We try a range of possible guesses. Note that this is really just 1078 # a linear system, so we continue to have difficulties with this part of 1079 # the calculation, or if it becomes a slow-down, we should consider doing 1080 # it by a linear solve, rather than using fsolve. 1081 possible_guesses = [curr_alg_yp, alg_typ_vals, 1082 ones_arr, 1083 scipy.mean(abs(yp)) * ones_arr, 1084 -scipy.mean(abs(yp)) * ones_arr, 1085 max(abs(yp)) * ones_arr, 1086 -max(abs(yp)) * ones_arr] 1087 if redirect_msgs: 1088 redir.start() 1089 try: 1090 for guess in possible_guesses: 1091 sln, infodict, ier, mesg = \ 1092 scipy.optimize.fsolve(net.alg_deriv_func, x0 = guess, 1093 xtol = min(rtol), 1094 args = (y, yp, time, constants), 1095 full_output=True) 1096 sln = scipy.atleast_1d(sln) 1097 final_residuals = net.alg_deriv_func(sln, y, yp, time, constants) 1098 if not scipy.any(abs(final_residuals) > abs(atol[var_types == -1])): 1099 break 1100 else: 1101 raise Utility.SloppyCellException('Failed to calculate alg var '\ 1102 'derivatives in network %s.' 1103 % net.get_id()) 1104 finally: 1105 messages=redir.stop() 1106 sln = scipy.atleast_1d(sln) 1107 yp[var_types == -1] = sln 1108 1109 return y, yp
1110
1111 -def _reduce_times(yout, tout, times):
1112 jj = 0 1113 maxjj = len(tout) 1114 for ii, twanted in enumerate(times): 1115 while (tout[jj] != twanted) & (jj<maxjj-1): 1116 jj += 1 1117 yout[ii] = yout[jj] 1118 1119 yout = yout[:len(times)] 1120 1121 return yout
1122