Package madgraph :: Package interface :: Module amcatnlo_run_interface
[hide private]
[frames] | no frames]

Source Code for Module madgraph.interface.amcatnlo_run_interface

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """A user friendly command line interface to access MadGraph5_aMC@NLO features. 
  16     Uses the cmd package for command interpretation and tab completion. 
  17  """ 
  18  from __future__ import division 
  19   
  20  import atexit 
  21  import glob 
  22  import logging 
  23  import math 
  24  import optparse 
  25  import os 
  26  import pydoc 
  27  import random 
  28  import re 
  29  import shutil 
  30  import subprocess 
  31  import sys 
  32  import traceback 
  33  import time 
  34  import signal 
  35  import tarfile 
  36  import copy 
  37  import datetime 
  38  import tarfile 
  39  import traceback 
  40  import StringIO 
  41  try: 
  42      import cpickle as pickle 
  43  except: 
  44      import pickle 
  45   
  46  try: 
  47      import readline 
  48      GNU_SPLITTING = ('GNU' in readline.__doc__) 
  49  except: 
  50      GNU_SPLITTING = True 
  51   
  52  root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0] 
  53  root_path = os.path.split(root_path)[0] 
  54  sys.path.insert(0, os.path.join(root_path,'bin')) 
  55   
  56  # usefull shortcut 
  57  pjoin = os.path.join 
  58  # Special logger for the Cmd Interface 
  59  logger = logging.getLogger('madgraph.stdout') # -> stdout 
  60  logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr 
  61    
  62  try: 
  63      import madgraph 
  64  except ImportError:  
  65      aMCatNLO = True  
  66      import internal.extended_cmd as cmd 
  67      import internal.common_run_interface as common_run 
  68      import internal.banner as banner_mod 
  69      import internal.misc as misc     
  70      from internal import InvalidCmd, MadGraph5Error 
  71      import internal.files as files 
  72      import internal.cluster as cluster 
  73      import internal.save_load_object as save_load_object 
  74      import internal.gen_crossxhtml as gen_crossxhtml 
  75      import internal.sum_html as sum_html 
  76      import internal.shower_card as shower_card 
  77      import internal.FO_analyse_card as analyse_card  
  78      import internal.lhe_parser as lhe_parser 
  79  else: 
  80      # import from madgraph directory 
  81      aMCatNLO = False 
  82      import madgraph.interface.extended_cmd as cmd 
  83      import madgraph.interface.common_run_interface as common_run 
  84      import madgraph.iolibs.files as files 
  85      import madgraph.iolibs.save_load_object as save_load_object 
  86      import madgraph.madevent.gen_crossxhtml as gen_crossxhtml 
  87      import madgraph.madevent.sum_html as sum_html 
  88      import madgraph.various.banner as banner_mod 
  89      import madgraph.various.cluster as cluster 
  90      import madgraph.various.misc as misc 
  91      import madgraph.various.shower_card as shower_card 
  92      import madgraph.various.FO_analyse_card as analyse_card 
  93      import madgraph.various.lhe_parser as lhe_parser 
  94      from madgraph import InvalidCmd, aMCatNLOError, MadGraph5Error,MG5DIR 
95 96 -class aMCatNLOError(Exception):
97 pass
98
99 100 -def compile_dir(*arguments):
101 """compile the direcory p_dir 102 arguments is the tuple (me_dir, p_dir, mode, options, tests, exe, run_mode) 103 this function needs not to be a class method in order to do 104 the compilation on multicore""" 105 106 if len(arguments) == 1: 107 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments[0] 108 elif len(arguments)==7: 109 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments 110 else: 111 raise aMCatNLOError, 'not correct number of argument' 112 logger.info(' Compiling %s...' % p_dir) 113 114 this_dir = pjoin(me_dir, 'SubProcesses', p_dir) 115 116 try: 117 #compile everything 118 # compile and run tests 119 for test in tests: 120 # skip check_poles for LOonly dirs 121 if test == 'check_poles' and os.path.exists(pjoin(this_dir, 'parton_lum_0.f')): 122 continue 123 if test == 'test_ME' or test == 'test_MC': 124 test_exe='test_soft_col_limits' 125 else: 126 test_exe=test 127 misc.compile([test_exe], cwd = this_dir, job_specs = False) 128 input = pjoin(me_dir, '%s_input.txt' % test) 129 #this can be improved/better written to handle the output 130 misc.call(['./%s' % (test_exe)], cwd=this_dir, 131 stdin = open(input), stdout=open(pjoin(this_dir, '%s.log' % test), 'w'), 132 close_fds=True) 133 if test == 'check_poles' and os.path.exists(pjoin(this_dir,'MadLoop5_resources')) : 134 tf=tarfile.open(pjoin(this_dir,'MadLoop5_resources.tar.gz'),'w:gz', 135 dereference=True) 136 tf.add(pjoin(this_dir,'MadLoop5_resources'),arcname='MadLoop5_resources') 137 tf.close() 138 139 if not options['reweightonly']: 140 misc.compile(['gensym'], cwd=this_dir, job_specs = False) 141 misc.call(['./gensym'],cwd= this_dir, 142 stdout=open(pjoin(this_dir, 'gensym.log'), 'w'), 143 close_fds=True) 144 #compile madevent_mintMC/mintFO 145 misc.compile([exe], cwd=this_dir, job_specs = False) 146 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 147 misc.compile(['reweight_xsec_events'], cwd=this_dir, job_specs = False) 148 149 logger.info(' %s done.' % p_dir) 150 return 0 151 except MadGraph5Error, msg: 152 return msg
153
154 155 -def check_compiler(options, block=False):
156 """check that the current fortran compiler is gfortran 4.6 or later. 157 If block, stops the execution, otherwise just print a warning""" 158 159 msg = 'In order to be able to run at NLO MadGraph5_aMC@NLO, you need to have ' + \ 160 'gfortran 4.6 or later installed.\n%s has been detected\n'+\ 161 'Note that You can still run all MadEvent run without any problem!' 162 #first check that gfortran is installed 163 if options['fortran_compiler']: 164 compiler = options['fortran_compiler'] 165 elif misc.which('gfortran'): 166 compiler = 'gfortran' 167 else: 168 compiler = '' 169 170 if 'gfortran' not in compiler: 171 if block: 172 raise aMCatNLOError(msg % compiler) 173 else: 174 logger.warning(msg % compiler) 175 else: 176 curr_version = misc.get_gfortran_version(compiler) 177 curr_version = curr_version.split('.') 178 if len(curr_version) == 1: 179 curr_version.append(0) 180 181 if int(curr_version[0]) < 5: 182 if int(curr_version[0]) == 4 and int(curr_version[1]) > 5: 183 return 184 if block: 185 raise aMCatNLOError(msg % (compiler + ' ' + curr_version)) 186 else: 187 logger.warning(msg % (compiler + ' ' + curr_version))
188
189 190 191 #=============================================================================== 192 # CmdExtended 193 #=============================================================================== 194 -class CmdExtended(common_run.CommonRunCmd):
195 """Particularisation of the cmd command for aMCatNLO""" 196 197 #suggested list of command 198 next_possibility = { 199 'start': [], 200 } 201 202 debug_output = 'ME5_debug' 203 error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' 204 error_debug += 'More information is found in \'%(debug)s\'.\n' 205 error_debug += 'Please attach this file to your report.' 206 207 config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' 208 209 210 keyboard_stop_msg = """stopping all operation 211 in order to quit MadGraph5_aMC@NLO please enter exit""" 212 213 # Define the Error 214 InvalidCmd = InvalidCmd 215 ConfigurationError = aMCatNLOError 216
217 - def __init__(self, me_dir, options, *arg, **opt):
218 """Init history and line continuation""" 219 220 # Tag allowing/forbiding question 221 self.force = False 222 223 # If possible, build an info line with current version number 224 # and date, from the VERSION text file 225 info = misc.get_pkg_info() 226 info_line = "" 227 if info and info.has_key('version') and info.has_key('date'): 228 len_version = len(info['version']) 229 len_date = len(info['date']) 230 if len_version + len_date < 30: 231 info_line = "#* VERSION %s %s %s *\n" % \ 232 (info['version'], 233 (30 - len_version - len_date) * ' ', 234 info['date']) 235 else: 236 version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() 237 info_line = "#* VERSION %s %s *\n" % \ 238 (version, (24 - len(version)) * ' ') 239 240 # Create a header for the history file. 241 # Remember to fill in time at writeout time! 242 self.history_header = \ 243 '#************************************************************\n' + \ 244 '#* MadGraph5_aMC@NLO *\n' + \ 245 '#* *\n' + \ 246 "#* * * *\n" + \ 247 "#* * * * * *\n" + \ 248 "#* * * * * 5 * * * * *\n" + \ 249 "#* * * * * *\n" + \ 250 "#* * * *\n" + \ 251 "#* *\n" + \ 252 "#* *\n" + \ 253 info_line + \ 254 "#* *\n" + \ 255 "#* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 256 "#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 257 "#* and *\n" + \ 258 "#* http://amcatnlo.cern.ch *\n" + \ 259 '#* *\n' + \ 260 '#************************************************************\n' + \ 261 '#* *\n' + \ 262 '#* Command File for aMCatNLO *\n' + \ 263 '#* *\n' + \ 264 '#* run as ./bin/aMCatNLO.py filename *\n' + \ 265 '#* *\n' + \ 266 '#************************************************************\n' 267 268 if info_line: 269 info_line = info_line[1:] 270 271 logger.info(\ 272 "************************************************************\n" + \ 273 "* *\n" + \ 274 "* W E L C O M E to M A D G R A P H 5 *\n" + \ 275 "* a M C @ N L O *\n" + \ 276 "* *\n" + \ 277 "* * * *\n" + \ 278 "* * * * * *\n" + \ 279 "* * * * * 5 * * * * *\n" + \ 280 "* * * * * *\n" + \ 281 "* * * *\n" + \ 282 "* *\n" + \ 283 info_line + \ 284 "* *\n" + \ 285 "* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 286 "* http://amcatnlo.cern.ch *\n" + \ 287 "* *\n" + \ 288 "* Type 'help' for in-line help. *\n" + \ 289 "* *\n" + \ 290 "************************************************************") 291 super(CmdExtended, self).__init__(me_dir, options, *arg, **opt)
292 293
294 - def get_history_header(self):
295 """return the history header""" 296 return self.history_header % misc.get_time_info()
297
298 - def stop_on_keyboard_stop(self):
299 """action to perform to close nicely on a keyboard interupt""" 300 try: 301 if hasattr(self, 'cluster'): 302 logger.info('rm jobs on queue') 303 self.cluster.remove() 304 if hasattr(self, 'results'): 305 self.update_status('Stop by the user', level=None, makehtml=True, error=True) 306 self.add_error_log_in_html(KeyboardInterrupt) 307 except: 308 pass
309
310 - def postcmd(self, stop, line):
311 """ Update the status of the run for finishing interactive command """ 312 313 # relaxing the tag forbidding question 314 self.force = False 315 316 if not self.use_rawinput: 317 return stop 318 319 320 arg = line.split() 321 if len(arg) == 0: 322 return stop 323 elif str(arg[0]) in ['exit','quit','EOF']: 324 return stop 325 326 try: 327 self.update_status('Command \'%s\' done.<br> Waiting for instruction.' % arg[0], 328 level=None, error=True) 329 except Exception: 330 misc.sprint('self.update_status fails', log=logger) 331 pass
332
333 - def nice_user_error(self, error, line):
334 """If a ME run is currently running add a link in the html output""" 335 336 self.add_error_log_in_html() 337 cmd.Cmd.nice_user_error(self, error, line)
338
339 - def nice_config_error(self, error, line):
340 """If a ME run is currently running add a link in the html output""" 341 342 self.add_error_log_in_html() 343 cmd.Cmd.nice_config_error(self, error, line)
344
345 - def nice_error_handling(self, error, line):
346 """If a ME run is currently running add a link in the html output""" 347 348 self.add_error_log_in_html() 349 cmd.Cmd.nice_error_handling(self, error, line)
350
351 352 353 #=============================================================================== 354 # HelpToCmd 355 #=============================================================================== 356 -class HelpToCmd(object):
357 """ The Series of help routine for the aMCatNLOCmd""" 358
359 - def help_launch(self):
360 """help for launch command""" 361 _launch_parser.print_help()
362
363 - def help_banner_run(self):
364 logger.info("syntax: banner_run Path|RUN [--run_options]") 365 logger.info("-- Reproduce a run following a given banner") 366 logger.info(" One of the following argument is require:") 367 logger.info(" Path should be the path of a valid banner.") 368 logger.info(" RUN should be the name of a run of the current directory") 369 self.run_options_help([('-f','answer all question by default'), 370 ('--name=X', 'Define the name associated with the new run')])
371 372
373 - def help_compile(self):
374 """help for compile command""" 375 _compile_parser.print_help()
376
377 - def help_generate_events(self):
378 """help for generate_events commandi 379 just call help_launch""" 380 _generate_events_parser.print_help()
381 382
383 - def help_calculate_xsect(self):
384 """help for generate_events command""" 385 _calculate_xsect_parser.print_help()
386
387 - def help_shower(self):
388 """help for shower command""" 389 _shower_parser.print_help()
390 391
392 - def help_open(self):
393 logger.info("syntax: open FILE ") 394 logger.info("-- open a file with the appropriate editor.") 395 logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') 396 logger.info(' the path to the last created/used directory is used')
397
398 - def run_options_help(self, data):
399 if data: 400 logger.info('-- local options:') 401 for name, info in data: 402 logger.info(' %s : %s' % (name, info)) 403 404 logger.info("-- session options:") 405 logger.info(" Note that those options will be kept for the current session") 406 logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) 407 logger.info(" --multicore : Run in multi-core configuration") 408 logger.info(" --nb_core=X : limit the number of core to use to X.")
409
410 411 412 413 #=============================================================================== 414 # CheckValidForCmd 415 #=============================================================================== 416 -class CheckValidForCmd(object):
417 """ The Series of check routine for the aMCatNLOCmd""" 418
419 - def check_shower(self, args, options):
420 """Check the validity of the line. args[0] is the run_directory""" 421 422 if options['force']: 423 self.force = True 424 425 if len(args) == 0: 426 self.help_shower() 427 raise self.InvalidCmd, 'Invalid syntax, please specify the run name' 428 if not os.path.isdir(pjoin(self.me_dir, 'Events', args[0])): 429 raise self.InvalidCmd, 'Directory %s does not exists' % \ 430 pjoin(os.getcwd(), 'Events', args[0]) 431 432 self.set_run_name(args[0], level= 'shower') 433 args[0] = pjoin(self.me_dir, 'Events', args[0])
434
435 - def check_plot(self, args):
436 """Check the argument for the plot command 437 plot run_name modes""" 438 439 440 madir = self.options['madanalysis_path'] 441 td = self.options['td_path'] 442 443 if not madir or not td: 444 logger.info('Retry to read configuration file to find madanalysis/td') 445 self.set_configuration() 446 447 madir = self.options['madanalysis_path'] 448 td = self.options['td_path'] 449 450 if not madir: 451 error_msg = 'No Madanalysis path correctly set.' 452 error_msg += 'Please use the set command to define the path and retry.' 453 error_msg += 'You can also define it in the configuration file.' 454 raise self.InvalidCmd(error_msg) 455 if not td: 456 error_msg = 'No path to td directory correctly set.' 457 error_msg += 'Please use the set command to define the path and retry.' 458 error_msg += 'You can also define it in the configuration file.' 459 raise self.InvalidCmd(error_msg) 460 461 if len(args) == 0: 462 if not hasattr(self, 'run_name') or not self.run_name: 463 self.help_plot() 464 raise self.InvalidCmd('No run name currently define. Please add this information.') 465 args.append('all') 466 return 467 468 469 if args[0] not in self._plot_mode: 470 self.set_run_name(args[0], level='plot') 471 del args[0] 472 if len(args) == 0: 473 args.append('all') 474 elif not self.run_name: 475 self.help_plot() 476 raise self.InvalidCmd('No run name currently define. Please add this information.') 477 478 for arg in args: 479 if arg not in self._plot_mode and arg != self.run_name: 480 self.help_plot() 481 raise self.InvalidCmd('unknown options %s' % arg)
482
483 - def check_pgs(self, arg):
484 """Check the argument for pythia command 485 syntax: pgs [NAME] 486 Note that other option are already remove at this point 487 """ 488 489 # If not pythia-pgs path 490 if not self.options['pythia-pgs_path']: 491 logger.info('Retry to read configuration file to find pythia-pgs path') 492 self.set_configuration() 493 494 if not self.options['pythia-pgs_path'] or not \ 495 os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): 496 error_msg = 'No pythia-pgs path correctly set.' 497 error_msg += 'Please use the set command to define the path and retry.' 498 error_msg += 'You can also define it in the configuration file.' 499 raise self.InvalidCmd(error_msg) 500 501 tag = [a for a in arg if a.startswith('--tag=')] 502 if tag: 503 arg.remove(tag[0]) 504 tag = tag[0][6:] 505 506 507 if len(arg) == 0 and not self.run_name: 508 if self.results.lastrun: 509 arg.insert(0, self.results.lastrun) 510 else: 511 raise self.InvalidCmd('No run name currently define. Please add this information.') 512 513 if len(arg) == 1 and self.run_name == arg[0]: 514 arg.pop(0) 515 516 if not len(arg) and \ 517 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 518 self.help_pgs() 519 raise self.InvalidCmd('''No file file pythia_events.hep currently available 520 Please specify a valid run_name''') 521 522 lock = None 523 if len(arg) == 1: 524 prev_tag = self.set_run_name(arg[0], tag, 'pgs') 525 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 526 527 if not filenames: 528 raise self.InvalidCmd('No events file corresponding to %s run with tag %s. '% (self.run_name, prev_tag)) 529 else: 530 input_file = filenames[0] 531 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 532 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 533 argument=['-c', input_file], 534 close_fds=True) 535 else: 536 if tag: 537 self.run_card['run_tag'] = tag 538 self.set_run_name(self.run_name, tag, 'pgs') 539 540 return lock
541 542
543 - def check_delphes(self, arg):
544 """Check the argument for pythia command 545 syntax: delphes [NAME] 546 Note that other option are already remove at this point 547 """ 548 549 # If not pythia-pgs path 550 if not self.options['delphes_path']: 551 logger.info('Retry to read configuration file to find delphes path') 552 self.set_configuration() 553 554 if not self.options['delphes_path']: 555 error_msg = 'No delphes path correctly set.' 556 error_msg += 'Please use the set command to define the path and retry.' 557 error_msg += 'You can also define it in the configuration file.' 558 raise self.InvalidCmd(error_msg) 559 560 tag = [a for a in arg if a.startswith('--tag=')] 561 if tag: 562 arg.remove(tag[0]) 563 tag = tag[0][6:] 564 565 566 if len(arg) == 0 and not self.run_name: 567 if self.results.lastrun: 568 arg.insert(0, self.results.lastrun) 569 else: 570 raise self.InvalidCmd('No run name currently define. Please add this information.') 571 572 if len(arg) == 1 and self.run_name == arg[0]: 573 arg.pop(0) 574 575 if not len(arg) and \ 576 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 577 self.help_pgs() 578 raise self.InvalidCmd('''No file file pythia_events.hep currently available 579 Please specify a valid run_name''') 580 581 if len(arg) == 1: 582 prev_tag = self.set_run_name(arg[0], tag, 'delphes') 583 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events')) 584 585 586 if not filenames: 587 raise self.InvalidCmd('No events file corresponding to %s run with tag %s.:%s '\ 588 % (self.run_name, prev_tag, 589 pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag))) 590 else: 591 input_file = filenames[0] 592 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 593 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 594 argument=['-c', input_file], 595 close_fds=True) 596 else: 597 if tag: 598 self.run_card['run_tag'] = tag 599 self.set_run_name(self.run_name, tag, 'delphes')
600
601 - def check_calculate_xsect(self, args, options):
602 """check the validity of the line. args is ORDER, 603 ORDER being LO or NLO. If no mode is passed, NLO is used""" 604 # modify args in order to be DIR 605 # mode being either standalone or madevent 606 607 if options['force']: 608 self.force = True 609 610 if not args: 611 args.append('NLO') 612 return 613 614 if len(args) > 1: 615 self.help_calculate_xsect() 616 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 617 618 elif len(args) == 1: 619 if not args[0] in ['NLO', 'LO']: 620 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 621 mode = args[0] 622 623 # check for incompatible options/modes 624 if options['multicore'] and options['cluster']: 625 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 626 ' are not compatible. Please choose one.'
627 628
629 - def check_generate_events(self, args, options):
630 """check the validity of the line. args is ORDER, 631 ORDER being LO or NLO. If no mode is passed, NLO is used""" 632 # modify args in order to be DIR 633 # mode being either standalone or madevent 634 635 if not args: 636 args.append('NLO') 637 return 638 639 if len(args) > 1: 640 self.help_generate_events() 641 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 642 643 elif len(args) == 1: 644 if not args[0] in ['NLO', 'LO']: 645 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 646 mode = args[0] 647 648 # check for incompatible options/modes 649 if options['multicore'] and options['cluster']: 650 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 651 ' are not compatible. Please choose one.'
652
653 - def check_banner_run(self, args):
654 """check the validity of line""" 655 656 if len(args) == 0: 657 self.help_banner_run() 658 raise self.InvalidCmd('banner_run requires at least one argument.') 659 660 tag = [a[6:] for a in args if a.startswith('--tag=')] 661 662 663 if os.path.exists(args[0]): 664 type ='banner' 665 format = self.detect_card_type(args[0]) 666 if format != 'banner': 667 raise self.InvalidCmd('The file is not a valid banner.') 668 elif tag: 669 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 670 (args[0], tag)) 671 if not os.path.exists(args[0]): 672 raise self.InvalidCmd('No banner associates to this name and tag.') 673 else: 674 name = args[0] 675 type = 'run' 676 banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) 677 if not banners: 678 raise self.InvalidCmd('No banner associates to this name.') 679 elif len(banners) == 1: 680 args[0] = banners[0] 681 else: 682 #list the tag and propose those to the user 683 tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] 684 tag = self.ask('which tag do you want to use?', tags[0], tags) 685 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 686 (args[0], tag)) 687 688 run_name = [arg[7:] for arg in args if arg.startswith('--name=')] 689 if run_name: 690 try: 691 self.exec_cmd('remove %s all banner -f' % run_name) 692 except Exception: 693 pass 694 self.set_run_name(args[0], tag=None, level='parton', reload_card=True) 695 elif type == 'banner': 696 self.set_run_name(self.find_available_run_name(self.me_dir)) 697 elif type == 'run': 698 if not self.results[name].is_empty(): 699 run_name = self.find_available_run_name(self.me_dir) 700 logger.info('Run %s is not empty so will use run_name: %s' % \ 701 (name, run_name)) 702 self.set_run_name(run_name) 703 else: 704 try: 705 self.exec_cmd('remove %s all banner -f' % run_name) 706 except Exception: 707 pass 708 self.set_run_name(name)
709 710 711
712 - def check_launch(self, args, options):
713 """check the validity of the line. args is MODE 714 MODE being LO, NLO, aMC@NLO or aMC@LO. If no mode is passed, auto is used""" 715 # modify args in order to be DIR 716 # mode being either standalone or madevent 717 718 if options['force']: 719 self.force = True 720 721 722 if not args: 723 args.append('auto') 724 return 725 726 if len(args) > 1: 727 self.help_launch() 728 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 729 730 elif len(args) == 1: 731 if not args[0] in ['LO', 'NLO', 'aMC@NLO', 'aMC@LO','auto']: 732 raise self.InvalidCmd, '%s is not a valid mode, please use "LO", "NLO", "aMC@NLO" or "aMC@LO"' % args[0] 733 mode = args[0] 734 735 # check for incompatible options/modes 736 if options['multicore'] and options['cluster']: 737 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 738 ' are not compatible. Please choose one.' 739 if mode == 'NLO' and options['reweightonly']: 740 raise self.InvalidCmd, 'option -r (--reweightonly) needs mode "aMC@NLO" or "aMC@LO"'
741 742
743 - def check_compile(self, args, options):
744 """check the validity of the line. args is MODE 745 MODE being FO or MC. If no mode is passed, MC is used""" 746 # modify args in order to be DIR 747 # mode being either standalone or madevent 748 749 if options['force']: 750 self.force = True 751 752 if not args: 753 args.append('MC') 754 return 755 756 if len(args) > 1: 757 self.help_compile() 758 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 759 760 elif len(args) == 1: 761 if not args[0] in ['MC', 'FO']: 762 raise self.InvalidCmd, '%s is not a valid mode, please use "FO" or "MC"' % args[0] 763 mode = args[0]
764
765 # check for incompatible options/modes 766 767 768 #=============================================================================== 769 # CompleteForCmd 770 #=============================================================================== 771 -class CompleteForCmd(CheckValidForCmd):
772 """ The Series of help routine for the MadGraphCmd""" 773
774 - def complete_launch(self, text, line, begidx, endidx):
775 """auto-completion for launch command""" 776 777 args = self.split_arg(line[0:begidx]) 778 if len(args) == 1: 779 #return mode 780 return self.list_completion(text,['LO','NLO','aMC@NLO','aMC@LO'],line) 781 elif len(args) == 2 and line[begidx-1] == '@': 782 return self.list_completion(text,['LO','NLO'],line) 783 else: 784 opts = [] 785 for opt in _launch_parser.option_list: 786 opts += opt._long_opts + opt._short_opts 787 return self.list_completion(text, opts, line)
788
789 - def complete_banner_run(self, text, line, begidx, endidx, formatting=True):
790 "Complete the banner run command" 791 try: 792 793 794 args = self.split_arg(line[0:begidx], error=False) 795 796 if args[-1].endswith(os.path.sep): 797 return self.path_completion(text, 798 os.path.join('.',*[a for a in args \ 799 if a.endswith(os.path.sep)])) 800 801 802 if len(args) > 1: 803 # only options are possible 804 tags = misc.glob('%s_*_banner.txt' % args[1],pjoin(self.me_dir, 'Events' , args[1])) 805 tags = ['%s' % os.path.basename(t)[len(args[1])+1:-11] for t in tags] 806 807 if args[-1] != '--tag=': 808 tags = ['--tag=%s' % t for t in tags] 809 else: 810 return self.list_completion(text, tags) 811 return self.list_completion(text, tags +['--name=','-f'], line) 812 813 # First argument 814 possibilites = {} 815 816 comp = self.path_completion(text, os.path.join('.',*[a for a in args \ 817 if a.endswith(os.path.sep)])) 818 if os.path.sep in line: 819 return comp 820 else: 821 possibilites['Path from ./'] = comp 822 823 run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) 824 run_list = [n.rsplit('/',2)[1] for n in run_list] 825 possibilites['RUN Name'] = self.list_completion(text, run_list) 826 827 return self.deal_multiple_categories(possibilites, formatting) 828 829 830 except Exception, error: 831 print error
832 833
834 - def complete_compile(self, text, line, begidx, endidx):
835 """auto-completion for launch command""" 836 837 args = self.split_arg(line[0:begidx]) 838 if len(args) == 1: 839 #return mode 840 return self.list_completion(text,['FO','MC'],line) 841 else: 842 opts = [] 843 for opt in _compile_parser.option_list: 844 opts += opt._long_opts + opt._short_opts 845 return self.list_completion(text, opts, line)
846
847 - def complete_calculate_xsect(self, text, line, begidx, endidx):
848 """auto-completion for launch command""" 849 850 args = self.split_arg(line[0:begidx]) 851 if len(args) == 1: 852 #return mode 853 return self.list_completion(text,['LO','NLO'],line) 854 else: 855 opts = [] 856 for opt in _calculate_xsect_parser.option_list: 857 opts += opt._long_opts + opt._short_opts 858 return self.list_completion(text, opts, line)
859
860 - def complete_generate_events(self, text, line, begidx, endidx):
861 """auto-completion for generate_events command 862 call the compeltion for launch""" 863 self.complete_launch(text, line, begidx, endidx)
864 865
866 - def complete_shower(self, text, line, begidx, endidx):
867 args = self.split_arg(line[0:begidx]) 868 if len(args) == 1: 869 #return valid run_name 870 data = misc.glob(pjoin('*','events.lhe.gz', pjoin(self.me_dir, 'Events'))) 871 data = [n.rsplit('/',2)[1] for n in data] 872 tmp1 = self.list_completion(text, data) 873 if not self.run_name: 874 return tmp1
875
876 - def complete_plot(self, text, line, begidx, endidx):
877 """ Complete the plot command """ 878 879 args = self.split_arg(line[0:begidx], error=False) 880 881 if len(args) == 1: 882 #return valid run_name 883 data = misc.glob(pjoin('*','events.lhe*', pjoin(self.me_dir, 'Events'))) 884 data = [n.rsplit('/',2)[1] for n in data] 885 tmp1 = self.list_completion(text, data) 886 if not self.run_name: 887 return tmp1 888 889 if len(args) > 1: 890 return self.list_completion(text, self._plot_mode)
891
892 - def complete_pgs(self,text, line, begidx, endidx):
893 "Complete the pgs command" 894 args = self.split_arg(line[0:begidx], error=False) 895 if len(args) == 1: 896 #return valid run_name 897 data = misc.glob(pjoin('*', 'events_*.hep.gz'), 898 pjoin(self.me_dir, 'Events')) 899 data = [n.rsplit('/',2)[1] for n in data] 900 tmp1 = self.list_completion(text, data) 901 if not self.run_name: 902 return tmp1 903 else: 904 tmp2 = self.list_completion(text, self._run_options + ['-f', 905 '--tag=' ,'--no_default'], line) 906 return tmp1 + tmp2 907 else: 908 return self.list_completion(text, self._run_options + ['-f', 909 '--tag=','--no_default'], line)
910 911 complete_delphes = complete_pgs
912
913 -class aMCatNLOAlreadyRunning(InvalidCmd):
914 pass
915
916 -class AskRunNLO(cmd.ControlSwitch):
917 918 to_control = [('order', 'Type of perturbative computation'), 919 ('fixed_order', 'No MC@[N]LO matching / event generation'), 920 ('shower', 'Shower the generated events'), 921 ('madspin', 'Decay onshell particles'), 922 ('reweight', 'Add weights to events for new hypp.'), 923 ('madanalysis','Run MadAnalysis5 on the events generated')] 924 925 quit_on = cmd.ControlSwitch.quit_on + ['onlyshower'] 926
927 - def __init__(self, question, line_args=[], mode=None, force=False, 928 *args, **opt):
929 930 self.me_dir = opt['mother_interface'].me_dir 931 self.check_available_module(opt['mother_interface'].options) 932 self.last_mode = opt['mother_interface'].last_mode 933 self.proc_characteristics = opt['mother_interface'].proc_characteristics 934 self.run_card = banner_mod.RunCard(pjoin(self.me_dir,'Cards', 'run_card.dat'), 935 consistency='warning') 936 super(AskRunNLO,self).__init__(self.to_control, opt['mother_interface'], 937 *args, **opt)
938 939 @property
940 - def answer(self):
941 942 out = super(AskRunNLO, self).answer 943 if out['shower'] == 'HERWIG7': 944 out['shower'] = 'HERWIGPP' 945 946 if out['shower'] not in self.get_allowed('shower') or out['shower'] =='OFF': 947 out['runshower'] = False 948 else: 949 out['runshower'] = True 950 return out
951 952
953 - def check_available_module(self, options):
954 955 self.available_module = set() 956 if options['madanalysis5_path']: 957 self.available_module.add('MA5') 958 if not aMCatNLO or ('mg5_path' in options and options['mg5_path']): 959 960 self.available_module.add('MadSpin') 961 if misc.has_f2py() or options['f2py_compiler']: 962 self.available_module.add('reweight') 963 if options['pythia8_path']: 964 self.available_module.add('PY8') 965 if options['hwpp_path'] and options['thepeg_path'] and options['hepmc_path']: 966 self.available_module.add('HW7') 967 968 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 969 if os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))): 970 self.available_module.add('StdHEP')
971 # 972 # shorcut 973 #
974 - def ans_lo(self, value):
975 """ function called if the user type lo=value. or lo (then value is None)""" 976 977 if value is None: 978 self.switch['order'] = 'LO' 979 self.switch['fixed_order'] = 'ON' 980 self.set_switch('shower', 'OFF') 981 else: 982 logger.warning('Invalid command: lo=%s' % value)
983
984 - def ans_nlo(self, value):
985 if value is None: 986 self.switch['order'] = 'NLO' 987 self.switch['fixed_order'] = 'ON' 988 self.set_switch('shower', 'OFF') 989 else: 990 logger.warning('Invalid command: nlo=%s' % value)
991
992 - def ans_amc__at__nlo(self, value):
993 if value is None: 994 self.switch['order'] = 'NLO' 995 self.switch['fixed_order'] = 'OFF' 996 self.set_switch('shower', 'ON') 997 else: 998 logger.warning('Invalid command: aMC@NLO=%s' % value)
999
1000 - def ans_amc__at__lo(self, value):
1001 if value is None: 1002 self.switch['order'] = 'LO' 1003 self.switch['fixed_order'] = 'OFF' 1004 self.set_switch('shower', 'ON') 1005 else: 1006 logger.warning('Invalid command: aMC@LO=%s' % value)
1007
1008 - def ans_noshower(self, value):
1009 if value is None: 1010 self.switch['order'] = 'NLO' 1011 self.switch['fixed_order'] = 'OFF' 1012 self.set_switch('shower', 'OFF') 1013 else: 1014 logger.warning('Invalid command: noshower=%s' % value)
1015
1016 - def ans_onlyshower(self, value):
1017 if value is None: 1018 self.switch['mode'] = 'onlyshower' 1019 self.switch['madspin'] = 'OFF' 1020 self.switch['reweight'] = 'OFF' 1021 else: 1022 logger.warning('Invalid command: onlyshower=%s' % value)
1023
1024 - def ans_noshowerlo(self, value):
1025 if value is None: 1026 self.switch['order'] = 'LO' 1027 self.switch['fixed_order'] = 'OFF' 1028 self.set_switch('shower', 'OFF') 1029 else: 1030 logger.warning('Invalid command: noshowerlo=%s' % value)
1031
1032 - def ans_madanalysis5(self, value):
1033 """ shortcut madanalysis5 -> madanalysis """ 1034 1035 if value is None: 1036 return self.onecmd('madanalysis') 1037 else: 1038 self.set_switch('madanalysis', value)
1039 # 1040 # ORDER 1041 #
1042 - def get_allowed_order(self):
1043 return ["LO", "NLO"]
1044
1045 - def set_default_order(self):
1046 1047 if self.last_mode in ['LO', 'aMC@L0', 'noshowerLO']: 1048 self.switch['order'] = 'LO' 1049 self.switch['order'] = 'NLO'
1050
1051 - def set_switch_off_order(self):
1052 return
1053 # 1054 # Fix order 1055 #
1056 - def get_allowed_fixed_order(self):
1057 """ """ 1058 if self.proc_characteristics['ninitial'] == 1: 1059 return ['ON'] 1060 else: 1061 return ['ON', 'OFF']
1062
1063 - def set_default_fixed_order(self):
1064 1065 if self.last_mode in ['LO', 'NLO']: 1066 self.switch['fixed_order'] = 'ON' 1067 if self.proc_characteristics['ninitial'] == 1: 1068 self.switch['fixed_order'] = 'ON' 1069 else: 1070 self.switch['fixed_order'] = 'OFF' 1071
1072 - def color_for_fixed_order(self, switch_value):
1073 1074 if switch_value in ['OFF']: 1075 return self.green % switch_value 1076 else: 1077 return self.red % switch_value
1078
1079 - def color_for_shower(self, switch_value):
1080 1081 if switch_value in ['ON']: 1082 return self.green % switch_value 1083 elif switch_value in self.get_allowed('shower'): 1084 return self.green % switch_value 1085 else: 1086 return self.red % switch_value
1087
1088 - def consistency_fixed_order_shower(self, vfix, vshower):
1089 """ consistency_XX_YY(val_XX, val_YY) 1090 -> XX is the new key set by the user to a new value val_XX 1091 -> YY is another key set by the user. 1092 -> return value should be None or "replace_YY" 1093 """ 1094 1095 if vfix == 'ON' and vshower != 'OFF' : 1096 return 'OFF' 1097 return None
1098 1099 consistency_fixed_order_madspin = consistency_fixed_order_shower 1100 consistency_fixed_order_reweight = consistency_fixed_order_shower 1101
1102 - def consistency_fixed_order_madanalysis(self, vfix, vma5):
1103 1104 if vfix == 'ON' and vma5 == 'ON' : 1105 return 'OFF' 1106 return None
1107 1108
1109 - def consistency_shower_fixed_order(self, vshower, vfix):
1110 """ consistency_XX_YY(val_XX, val_YY) 1111 -> XX is the new key set by the user to a new value val_XX 1112 -> YY is another key set by the user. 1113 -> return value should be None or "replace_YY" 1114 """ 1115 1116 if vshower != 'OFF' and vfix == 'ON': 1117 return 'OFF' 1118 return None
1119 1120 consistency_madspin_fixed_order = consistency_shower_fixed_order 1121 consistency_reweight_fixed_order = consistency_shower_fixed_order 1122 consistency_madanalysis_fixed_order = consistency_shower_fixed_order 1123 1124 1125 # 1126 # Shower 1127 #
1128 - def get_allowed_shower(self):
1129 """ """ 1130 1131 if hasattr(self, 'allowed_shower'): 1132 return self.allowed_shower 1133 1134 if not misc.which('bc'): 1135 return ['OFF'] 1136 1137 if self.proc_characteristics['ninitial'] == 1: 1138 self.allowed_shower = ['OFF'] 1139 return ['OFF'] 1140 else: 1141 if 'StdHEP' in self.available_module: 1142 allowed = ['HERWIG6','OFF', 'PYTHIA6Q', 'PYTHIA6PT', ] 1143 else: 1144 allowed = ['OFF'] 1145 if 'PY8' in self.available_module: 1146 allowed.append('PYTHIA8') 1147 if 'HW7' in self.available_module: 1148 allowed.append('HERWIGPP') 1149 1150 1151 self.allowed_shower = allowed 1152 1153 return allowed
1154
1155 - def check_value_shower(self, value):
1156 """ """ 1157 1158 if value.upper() in self.get_allowed_shower(): 1159 return True 1160 if value.upper() in ['PYTHIA8', 'HERWIGPP']: 1161 return True 1162 if value.upper() == 'ON': 1163 return self.run_card['parton_shower'] 1164 if value.upper() in ['P8','PY8','PYTHIA_8']: 1165 return 'PYTHIA8' 1166 if value.upper() in ['PY6','P6','PY6PT', 'PYTHIA_6', 'PYTHIA_6PT','PYTHIA6PT','PYTHIA6_PT']: 1167 return 'PYTHIA6PT' 1168 if value.upper() in ['PY6Q', 'PYTHIA_6Q','PYTHIA6Q', 'PYTHIA6_Q']: 1169 return 'PYTHIA6Q' 1170 if value.upper() in ['HW7', 'HERWIG7']: 1171 return 'HERWIG7' 1172 if value.upper() in ['HW++', 'HWPP', 'HERWIG++']: 1173 return 'HERWIGPP' 1174 if value.upper() in ['HW6', 'HERWIG_6']: 1175 return 'HERWIG6'
1176
1177 - def set_default_shower(self):
1178 1179 if self.last_mode in ['LO', 'NLO', 'noshower', 'noshowerLO']: 1180 self.switch['shower'] = 'OFF' 1181 return 1182 1183 if self.proc_characteristics['ninitial'] == 1: 1184 self.switch['shower'] = 'OFF' 1185 return 1186 1187 if not misc.which('bc'): 1188 logger.warning('bc command not available. Forbids to run the shower. please install it if you want to run the shower. (sudo apt-get install bc)') 1189 self.switch['shower'] = 'OFF' 1190 return 1191 1192 if os.path.exists(pjoin(self.me_dir, 'Cards', 'shower_card.dat')): 1193 self.switch['shower'] = self.run_card['parton_shower'] 1194 #self.switch['shower'] = 'ON' 1195 self.switch['fixed_order'] = "OFF" 1196 else: 1197 self.switch['shower'] = 'OFF' 1198
1199 - def consistency_shower_madanalysis(self, vshower, vma5):
1200 """ MA5 only possible with (N)LO+PS if shower is run""" 1201 1202 if vshower == 'OFF' and vma5 == 'ON': 1203 return 'OFF' 1204 return None
1205
1206 - def consistency_madanalysis_shower(self, vma5, vshower):
1207 1208 if vma5=='ON' and vshower == 'OFF': 1209 return 'ON' 1210 return None
1211
1212 - def get_cardcmd_for_shower(self, value):
1213 """ adpat run_card according to this setup. return list of cmd to run""" 1214 1215 if value != 'OFF': 1216 return ['set parton_shower %s' % self.switch['shower']] 1217 return []
1218 1219 # 1220 # madspin 1221 #
1222 - def get_allowed_madspin(self):
1223 """ """ 1224 1225 if hasattr(self, 'allowed_madspin'): 1226 return self.allowed_madspin 1227 1228 self.allowed_madspin = [] 1229 1230 1231 if 'MadSpin' not in self.available_module: 1232 return self.allowed_madspin 1233 if self.proc_characteristics['ninitial'] == 1: 1234 self.available_module.remove('MadSpin') 1235 self.allowed_madspin = ['OFF'] 1236 return self.allowed_madspin 1237 else: 1238 self.allowed_madspin = ['OFF', 'ON', 'onshell'] 1239 return self.allowed_madspin
1240
1241 - def check_value_madspin(self, value):
1242 """handle alias and valid option not present in get_allowed_madspin 1243 remember that this mode should always be OFF for 1>N. (ON not in allowed value)""" 1244 1245 if value.upper() in self.get_allowed_madspin(): 1246 if value == value.upper(): 1247 return True 1248 else: 1249 return value.upper() 1250 elif value.lower() in self.get_allowed_madspin(): 1251 if value == value.lower(): 1252 return True 1253 else: 1254 return value.lower() 1255 1256 if 'MadSpin' not in self.available_module or \ 1257 'ON' not in self.get_allowed_madspin(): 1258 return False 1259 1260 if value.lower() in ['madspin', 'full']: 1261 return 'full' 1262 elif value.lower() in ['none']: 1263 return 'none'
1264
1265 - def set_default_madspin(self):
1266 1267 if 'MadSpin' in self.available_module: 1268 if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 1269 self.switch['madspin'] = 'ON' 1270 else: 1271 self.switch['madspin'] = 'OFF' 1272 else: 1273 self.switch['madspin'] = 'Not Avail.'
1274
1275 - def get_cardcmd_for_madspin(self, value):
1276 """set some command to run before allowing the user to modify the cards.""" 1277 1278 if value == 'onshell': 1279 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] 1280 elif value in ['full', 'madspin']: 1281 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode madspin"] 1282 elif value == 'none': 1283 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] 1284 else: 1285 return []
1286 1287 # 1288 # reweight 1289 #
1290 - def get_allowed_reweight(self):
1291 """set the valid (visible) options for reweight""" 1292 1293 if hasattr(self, 'allowed_reweight'): 1294 return getattr(self, 'allowed_reweight') 1295 1296 self.allowed_reweight = [] 1297 if 'reweight' not in self.available_module: 1298 return self.allowed_reweight 1299 if self.proc_characteristics['ninitial'] == 1: 1300 self.available_module.remove('reweight') 1301 self.allowed_reweight.append('OFF') 1302 return self.allowed_reweight 1303 else: 1304 self.allowed_reweight = [ 'OFF', 'ON', 'NLO', 'NLO_TREE','LO'] 1305 return self.allowed_reweight
1306
1307 - def set_default_reweight(self):
1308 """initialise the switch for reweight""" 1309 1310 if 'reweight' in self.available_module: 1311 if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): 1312 self.switch['reweight'] = 'ON' 1313 else: 1314 self.switch['reweight'] = 'OFF' 1315 else: 1316 self.switch['reweight'] = 'Not Avail.'
1317
1318 - def get_cardcmd_for_reweight(self, value):
1319 """ adpat run_card according to this setup. return list of cmd to run""" 1320 1321 if value == 'LO': 1322 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode LO"] 1323 elif value == 'NLO': 1324 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO", 1325 "set store_rwgt_info T"] 1326 elif value == 'NLO_TREE': 1327 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO_tree", 1328 "set store_rwgt_info T"] 1329 return []
1330 1331 # 1332 # MadAnalysis5 1333 #
1334 - def get_allowed_madanalysis(self):
1335 1336 if hasattr(self, 'allowed_madanalysis'): 1337 return self.allowed_madanalysis 1338 1339 self.allowed_madanalysis = [] 1340 1341 1342 if 'MA5' not in self.available_module: 1343 return self.allowed_madanalysis 1344 1345 if self.proc_characteristics['ninitial'] == 1: 1346 self.available_module.remove('MA5') 1347 self.allowed_madanalysis = ['OFF'] 1348 return self.allowed_madanalysis 1349 else: 1350 self.allowed_madanalysis = ['OFF', 'ON'] 1351 return self.allowed_madanalysis
1352
1353 - def set_default_madanalysis(self):
1354 """initialise the switch for reweight""" 1355 1356 if 'MA5' not in self.available_module: 1357 self.switch['madanalysis'] = 'Not Avail.' 1358 elif os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat')): 1359 self.switch['madanalysis'] = 'ON' 1360 else: 1361 self.switch['madanalysis'] = 'OFF'
1362
1363 - def check_value_madanalysis(self, value):
1364 """check an entry is valid. return the valid entry in case of shortcut""" 1365 1366 if value.upper() in self.get_allowed('madanalysis'): 1367 return True 1368 value = value.lower() 1369 if value == 'hadron': 1370 return 'ON' if 'ON' in self.get_allowed_madanalysis5 else False 1371 else: 1372 return False
1373
1374 1375 #=============================================================================== 1376 # aMCatNLOCmd 1377 #=============================================================================== 1378 -class aMCatNLOCmd(CmdExtended, HelpToCmd, CompleteForCmd, common_run.CommonRunCmd):
1379 """The command line processor of MadGraph""" 1380 1381 # Truth values 1382 true = ['T','.true.',True,'true'] 1383 # Options and formats available 1384 _run_options = ['--cluster','--multicore','--nb_core=','--nb_core=2', '-c', '-m'] 1385 _generate_options = ['-f', '--laststep=parton', '--laststep=pythia', '--laststep=pgs', '--laststep=delphes'] 1386 _calculate_decay_options = ['-f', '--accuracy=0.'] 1387 _set_options = ['stdout_level','fortran_compiler','cpp_compiler','timeout'] 1388 _plot_mode = ['all', 'parton','shower','pgs','delphes'] 1389 _clean_mode = _plot_mode + ['channel', 'banner'] 1390 _display_opts = ['run_name', 'options', 'variable'] 1391 # survey options, dict from name to type, default value, and help text 1392 # Variables to store object information 1393 web = False 1394 cluster_mode = 0 1395 queue = 'madgraph' 1396 nb_core = None 1397 make_opts_var = {} 1398 1399 next_possibility = { 1400 'start': ['generate_events [OPTIONS]', 'calculate_crossx [OPTIONS]', 'launch [OPTIONS]', 1401 'help generate_events'], 1402 'generate_events': ['generate_events [OPTIONS]', 'shower'], 1403 'launch': ['launch [OPTIONS]', 'shower'], 1404 'shower' : ['generate_events [OPTIONS]'] 1405 } 1406 1407 1408 ############################################################################
1409 - def __init__(self, me_dir = None, options = {}, *completekey, **stdin):
1410 """ add information to the cmd """ 1411 1412 self.start_time = 0 1413 CmdExtended.__init__(self, me_dir, options, *completekey, **stdin) 1414 #common_run.CommonRunCmd.__init__(self, me_dir, options) 1415 1416 self.mode = 'aMCatNLO' 1417 self.nb_core = 0 1418 self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) 1419 1420 1421 self.load_results_db() 1422 self.results.def_web_mode(self.web) 1423 # check that compiler is gfortran 4.6 or later if virtuals have been exported 1424 proc_card = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read() 1425 1426 if not '[real=QCD]' in proc_card: 1427 check_compiler(self.options, block=True)
1428 1429 1430 ############################################################################
1431 - def do_shower(self, line):
1432 """ run the shower on a given parton level file """ 1433 argss = self.split_arg(line) 1434 (options, argss) = _launch_parser.parse_args(argss) 1435 # check argument validity and normalise argument 1436 options = options.__dict__ 1437 options['reweightonly'] = False 1438 self.check_shower(argss, options) 1439 evt_file = pjoin(os.getcwd(), argss[0], 'events.lhe') 1440 self.ask_run_configuration('onlyshower', options) 1441 self.run_mcatnlo(evt_file, options) 1442 1443 self.update_status('', level='all', update_results=True)
1444 1445 ################################################################################
1446 - def do_plot(self, line):
1447 """Create the plot for a given run""" 1448 1449 # Since in principle, all plot are already done automaticaly 1450 args = self.split_arg(line) 1451 # Check argument's validity 1452 self.check_plot(args) 1453 logger.info('plot for run %s' % self.run_name) 1454 1455 if not self.force: 1456 self.ask_edit_cards([], args, plot=True) 1457 1458 if any([arg in ['parton'] for arg in args]): 1459 filename = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe') 1460 if os.path.exists(filename+'.gz'): 1461 misc.gunzip(filename) 1462 if os.path.exists(filename): 1463 logger.info('Found events.lhe file for run %s' % self.run_name) 1464 shutil.move(filename, pjoin(self.me_dir, 'Events', 'unweighted_events.lhe')) 1465 self.create_plot('parton') 1466 shutil.move(pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'), filename) 1467 misc.gzip(filename) 1468 1469 if any([arg in ['all','parton'] for arg in args]): 1470 filename = pjoin(self.me_dir, 'Events', self.run_name, 'MADatNLO.top') 1471 if os.path.exists(filename): 1472 logger.info('Found MADatNLO.top file for run %s' % \ 1473 self.run_name) 1474 output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') 1475 plot_dir = pjoin(self.me_dir, 'HTML', self.run_name, 'plots_parton') 1476 1477 if not os.path.isdir(plot_dir): 1478 os.makedirs(plot_dir) 1479 top_file = pjoin(plot_dir, 'plots.top') 1480 files.cp(filename, top_file) 1481 madir = self.options['madanalysis_path'] 1482 tag = self.run_card['run_tag'] 1483 td = self.options['td_path'] 1484 misc.call(['%s/plot' % self.dirbin, madir, td], 1485 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1486 stderr = subprocess.STDOUT, 1487 cwd=plot_dir) 1488 1489 misc.call(['%s/plot_page-pl' % self.dirbin, 1490 os.path.basename(plot_dir), 1491 'parton'], 1492 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1493 stderr = subprocess.STDOUT, 1494 cwd=pjoin(self.me_dir, 'HTML', self.run_name)) 1495 shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'), 1496 output) 1497 1498 os.remove(pjoin(self.me_dir, 'Events', 'plots.top')) 1499 1500 if any([arg in ['all','shower'] for arg in args]): 1501 filenames = misc.glob('events_*.lhe.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1502 if len(filenames) != 1: 1503 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1504 if len(filenames) != 1: 1505 logger.info('No shower level file found for run %s' % \ 1506 self.run_name) 1507 return 1508 filename = filenames[0] 1509 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1510 1511 if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pythia_card.dat')): 1512 if aMCatNLO and not self.options['mg5_path']: 1513 raise "plotting NLO HEP file needs MG5 utilities" 1514 1515 files.cp(pjoin(self.options['mg5_path'], 'Template','LO', 'Cards', 'pythia_card_default.dat'), 1516 pjoin(self.me_dir, 'Cards', 'pythia_card.dat')) 1517 self.run_hep2lhe() 1518 else: 1519 filename = filenames[0] 1520 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1521 1522 self.create_plot('shower') 1523 lhe_file_name = filename.replace('.hep.gz', '.lhe') 1524 shutil.move(pjoin(self.me_dir, 'Events','pythia_events.lhe'), 1525 lhe_file_name) 1526 misc.gzip(lhe_file_name) 1527 1528 if any([arg in ['all','pgs'] for arg in args]): 1529 filename = pjoin(self.me_dir, 'Events', self.run_name, 1530 '%s_pgs_events.lhco' % self.run_tag) 1531 if os.path.exists(filename+'.gz'): 1532 misc.gunzip(filename) 1533 if os.path.exists(filename): 1534 self.create_plot('PGS') 1535 misc.gzip(filename) 1536 else: 1537 logger.info('No valid files for pgs plot') 1538 1539 if any([arg in ['all','delphes'] for arg in args]): 1540 filename = pjoin(self.me_dir, 'Events', self.run_name, 1541 '%s_delphes_events.lhco' % self.run_tag) 1542 if os.path.exists(filename+'.gz'): 1543 misc.gunzip(filename) 1544 if os.path.exists(filename): 1545 #shutil.move(filename, pjoin(self.me_dir, 'Events','delphes_events.lhco')) 1546 self.create_plot('Delphes') 1547 #shutil.move(pjoin(self.me_dir, 'Events','delphes_events.lhco'), filename) 1548 misc.gzip(filename) 1549 else: 1550 logger.info('No valid files for delphes plot')
1551 1552 1553 ############################################################################
1554 - def do_calculate_xsect(self, line):
1555 """Main commands: calculates LO/NLO cross-section, using madevent_mintFO 1556 this function wraps the do_launch one""" 1557 1558 self.start_time = time.time() 1559 argss = self.split_arg(line) 1560 # check argument validity and normalise argument 1561 (options, argss) = _calculate_xsect_parser.parse_args(argss) 1562 options = options.__dict__ 1563 options['reweightonly'] = False 1564 options['parton'] = True 1565 self.check_calculate_xsect(argss, options) 1566 self.do_launch(line, options, argss)
1567 1568 ############################################################################
1569 - def do_banner_run(self, line):
1570 """Make a run from the banner file""" 1571 1572 args = self.split_arg(line) 1573 #check the validity of the arguments 1574 self.check_banner_run(args) 1575 1576 # Remove previous cards 1577 for name in ['shower_card.dat', 'madspin_card.dat']: 1578 try: 1579 os.remove(pjoin(self.me_dir, 'Cards', name)) 1580 except Exception: 1581 pass 1582 1583 banner_mod.split_banner(args[0], self.me_dir, proc_card=False) 1584 1585 # Check if we want to modify the run 1586 if not self.force: 1587 ans = self.ask('Do you want to modify the Cards/Run Type?', 'n', ['y','n']) 1588 if ans == 'n': 1589 self.force = True 1590 1591 # Compute run mode: 1592 if self.force: 1593 mode_status = {'order': 'NLO', 'fixed_order': False, 'madspin':False, 'shower':True} 1594 banner = banner_mod.Banner(args[0]) 1595 for line in banner['run_settings']: 1596 if '=' in line: 1597 mode, value = [t.strip() for t in line.split('=')] 1598 mode_status[mode] = value 1599 else: 1600 mode_status = {} 1601 1602 # Call Generate events 1603 self.do_launch('-n %s %s' % (self.run_name, '-f' if self.force else ''), 1604 switch=mode_status)
1605 1606 ############################################################################
1607 - def do_generate_events(self, line):
1608 """Main commands: generate events 1609 this function just wraps the do_launch one""" 1610 self.do_launch(line)
1611 1612 1613 ############################################################################
1614 - def do_treatcards(self, line, amcatnlo=True,mode=''):
1615 """Advanced commands: this is for creating the correct run_card.inc from the nlo format""" 1616 #check if no 'Auto' are present in the file 1617 self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) 1618 1619 # propagate the FO_card entry FO_LHE_weight_ratio to the run_card. 1620 # this variable is system only in the run_card 1621 # can not be done in EditCard since this parameter is not written in the 1622 # run_card directly. 1623 if mode in ['LO', 'NLO']: 1624 name = 'fo_lhe_weight_ratio' 1625 FO_card = analyse_card.FOAnalyseCard(pjoin(self.me_dir,'Cards', 'FO_analyse_card.dat')) 1626 if name in FO_card: 1627 self.run_card.set(name, FO_card[name], user=False) 1628 name = 'fo_lhe_postprocessing' 1629 if name in FO_card: 1630 self.run_card.set(name, FO_card[name], user=False) 1631 1632 return super(aMCatNLOCmd,self).do_treatcards(line, amcatnlo)
1633 1634 ############################################################################
1635 - def set_configuration(self, amcatnlo=True, **opt):
1636 """assign all configuration variable from file 1637 loop over the different config file if config_file not define """ 1638 return super(aMCatNLOCmd,self).set_configuration(amcatnlo=amcatnlo, **opt)
1639 1640 ############################################################################
1641 - def do_launch(self, line, options={}, argss=[], switch={}):
1642 """Main commands: launch the full chain 1643 options and args are relevant if the function is called from other 1644 functions, such as generate_events or calculate_xsect 1645 mode gives the list of switch needed for the computation (usefull for banner_run) 1646 """ 1647 1648 if not argss and not options: 1649 self.start_time = time.time() 1650 argss = self.split_arg(line) 1651 # check argument validity and normalise argument 1652 (options, argss) = _launch_parser.parse_args(argss) 1653 options = options.__dict__ 1654 self.check_launch(argss, options) 1655 1656 1657 if 'run_name' in options.keys() and options['run_name']: 1658 self.run_name = options['run_name'] 1659 # if a dir with the given run_name already exists 1660 # remove it and warn the user 1661 if os.path.isdir(pjoin(self.me_dir, 'Events', self.run_name)): 1662 logger.warning('Removing old run information in \n'+ 1663 pjoin(self.me_dir, 'Events', self.run_name)) 1664 files.rm(pjoin(self.me_dir, 'Events', self.run_name)) 1665 self.results.delete_run(self.run_name) 1666 else: 1667 self.run_name = '' # will be set later 1668 1669 if options['multicore']: 1670 self.cluster_mode = 2 1671 elif options['cluster']: 1672 self.cluster_mode = 1 1673 1674 if not switch: 1675 mode = argss[0] 1676 1677 if mode in ['LO', 'NLO']: 1678 options['parton'] = True 1679 mode = self.ask_run_configuration(mode, options) 1680 else: 1681 mode = self.ask_run_configuration('auto', options, switch) 1682 1683 self.results.add_detail('run_mode', mode) 1684 1685 self.update_status('Starting run', level=None, update_results=True) 1686 1687 if self.options['automatic_html_opening']: 1688 misc.open_file(os.path.join(self.me_dir, 'crossx.html')) 1689 self.options['automatic_html_opening'] = False 1690 1691 if '+' in mode: 1692 mode = mode.split('+')[0] 1693 self.compile(mode, options) 1694 evt_file = self.run(mode, options) 1695 1696 if self.run_card['nevents'] == 0 and not mode in ['LO', 'NLO']: 1697 logger.info('No event file generated: grids have been set-up with a '\ 1698 'relative precision of %s' % self.run_card['req_acc']) 1699 return 1700 1701 if not mode in ['LO', 'NLO']: 1702 assert evt_file == pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'), '%s != %s' %(evt_file, pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')) 1703 1704 if self.run_card['systematics_program'] == 'systematics': 1705 self.exec_cmd('systematics %s %s ' % (self.run_name, ' '.join(self.run_card['systematics_arguments']))) 1706 1707 self.exec_cmd('reweight -from_cards', postcmd=False) 1708 self.exec_cmd('decay_events -from_cards', postcmd=False) 1709 evt_file = pjoin(self.me_dir,'Events', self.run_name, 'events.lhe') 1710 1711 if not mode in ['LO', 'NLO', 'noshower', 'noshowerLO'] \ 1712 and not options['parton']: 1713 self.run_mcatnlo(evt_file, options) 1714 self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) 1715 1716 elif mode == 'noshower': 1717 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. 1718 Please, shower the Les Houches events before using them for physics analyses.""") 1719 1720 1721 self.update_status('', level='all', update_results=True) 1722 if self.run_card['ickkw'] == 3 and \ 1723 (mode in ['noshower'] or \ 1724 (('PYTHIA8' not in self.run_card['parton_shower'].upper()) and (mode in ['aMC@NLO']))): 1725 logger.warning("""You are running with FxFx merging enabled. 1726 To be able to merge samples of various multiplicities without double counting, 1727 you have to remove some events after showering 'by hand'. 1728 Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 1729 1730 self.store_result() 1731 #check if the param_card defines a scan. 1732 if self.param_card_iterator: 1733 cpath = pjoin(self.me_dir,'Cards','param_card.dat') 1734 param_card_iterator = self.param_card_iterator 1735 self.param_card_iterator = [] #avoid to next generate go trough here 1736 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1737 error=self.results.current['error'], 1738 param_card_path=cpath) 1739 orig_name = self.run_name 1740 #go trough the scal 1741 with misc.TMP_variable(self, 'allow_notification_center', False): 1742 for i,card in enumerate(param_card_iterator): 1743 card.write(cpath) 1744 self.check_param_card(cpath, dependent=True) 1745 if not options['force']: 1746 options['force'] = True 1747 if options['run_name']: 1748 options['run_name'] = '%s_%s' % (orig_name, i+1) 1749 if not argss: 1750 argss = [mode, "-f"] 1751 elif argss[0] == "auto": 1752 argss[0] = mode 1753 self.do_launch("", options=options, argss=argss, switch=switch) 1754 #self.exec_cmd("launch -f ",precmd=True, postcmd=True,errorhandling=False) 1755 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1756 error=self.results.current['error'], 1757 param_card_path=cpath) 1758 #restore original param_card 1759 param_card_iterator.write(pjoin(self.me_dir,'Cards','param_card.dat')) 1760 name = misc.get_scan_name(orig_name, self.run_name) 1761 path = pjoin(self.me_dir, 'Events','scan_%s.txt' % name) 1762 logger.info("write all cross-section results in %s" % path, '$MG:BOLD') 1763 param_card_iterator.write_summary(path) 1764 1765 if self.allow_notification_center: 1766 misc.apple_notify('Run %s finished' % os.path.basename(self.me_dir), 1767 '%s: %s +- %s ' % (self.results.current['run_name'], 1768 self.results.current['cross'], 1769 self.results.current['error']))
1770 1771 1772 ############################################################################
1773 - def do_compile(self, line):
1774 """Advanced commands: just compile the executables """ 1775 argss = self.split_arg(line) 1776 # check argument validity and normalise argument 1777 (options, argss) = _compile_parser.parse_args(argss) 1778 options = options.__dict__ 1779 options['reweightonly'] = False 1780 options['nocompile'] = False 1781 self.check_compile(argss, options) 1782 1783 mode = {'FO': 'NLO', 'MC': 'aMC@NLO'}[argss[0]] 1784 self.ask_run_configuration(mode, options) 1785 self.compile(mode, options) 1786 1787 1788 self.update_status('', level='all', update_results=True)
1789 1790
1791 - def update_random_seed(self):
1792 """Update random number seed with the value from the run_card. 1793 If this is 0, update the number according to a fresh one""" 1794 iseed = self.run_card['iseed'] 1795 if iseed == 0: 1796 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit')) 1797 iseed = int(randinit.read()[2:]) + 1 1798 randinit.close() 1799 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w') 1800 randinit.write('r=%d' % iseed) 1801 randinit.close()
1802 1803
1804 - def run(self, mode, options):
1805 """runs aMC@NLO. Returns the name of the event file created""" 1806 logger.info('Starting run') 1807 1808 if not 'only_generation' in options.keys(): 1809 options['only_generation'] = False 1810 1811 # for second step in applgrid mode, do only the event generation step 1812 if mode in ['LO', 'NLO'] and self.run_card['iappl'] == 2 and not options['only_generation']: 1813 options['only_generation'] = True 1814 self.get_characteristics(pjoin(self.me_dir, 'SubProcesses', 'proc_characteristics')) 1815 self.setup_cluster_or_multicore() 1816 self.update_random_seed() 1817 #find and keep track of all the jobs 1818 folder_names = {'LO': ['born_G*'], 'NLO': ['all_G*'], 1819 'aMC@LO': ['GB*'], 'aMC@NLO': ['GF*']} 1820 folder_names['noshower'] = folder_names['aMC@NLO'] 1821 folder_names['noshowerLO'] = folder_names['aMC@LO'] 1822 p_dirs = [d for d in \ 1823 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 1824 #Clean previous results 1825 self.clean_previous_results(options,p_dirs,folder_names[mode]) 1826 1827 mcatnlo_status = ['Setting up grids', 'Computing upper envelope', 'Generating events'] 1828 1829 1830 if options['reweightonly']: 1831 event_norm=self.run_card['event_norm'] 1832 nevents=self.run_card['nevents'] 1833 return self.reweight_and_collect_events(options, mode, nevents, event_norm) 1834 1835 if mode in ['LO', 'NLO']: 1836 # this is for fixed order runs 1837 mode_dict = {'NLO': 'all', 'LO': 'born'} 1838 logger.info('Doing fixed order %s' % mode) 1839 req_acc = self.run_card['req_acc_FO'] 1840 1841 # Re-distribute the grids for the 2nd step of the applgrid 1842 # running 1843 if self.run_card['iappl'] == 2: 1844 self.applgrid_distribute(options,mode_dict[mode],p_dirs) 1845 1846 # create a list of dictionaries "jobs_to_run" with all the 1847 # jobs that need to be run 1848 integration_step=-1 1849 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1850 req_acc,mode_dict[mode],integration_step,mode,fixed_order=True) 1851 self.prepare_directories(jobs_to_run,mode) 1852 1853 # loop over the integration steps. After every step, check 1854 # if we have the required accuracy. If this is the case, 1855 # stop running, else do another step. 1856 while True: 1857 integration_step=integration_step+1 1858 self.run_all_jobs(jobs_to_run,integration_step) 1859 self.collect_log_files(jobs_to_run,integration_step) 1860 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1861 jobs_to_collect,integration_step,mode,mode_dict[mode]) 1862 if not jobs_to_run: 1863 # there are no more jobs to run (jobs_to_run is empty) 1864 break 1865 # We are done. 1866 self.finalise_run_FO(folder_names[mode],jobs_to_collect) 1867 self.update_status('Run complete', level='parton', update_results=True) 1868 return 1869 1870 elif mode in ['aMC@NLO','aMC@LO','noshower','noshowerLO']: 1871 if self.ninitial == 1: 1872 raise aMCatNLOError('Decay processes can only be run at fixed order.') 1873 mode_dict = {'aMC@NLO': 'all', 'aMC@LO': 'born',\ 1874 'noshower': 'all', 'noshowerLO': 'born'} 1875 shower = self.run_card['parton_shower'].upper() 1876 nevents = self.run_card['nevents'] 1877 req_acc = self.run_card['req_acc'] 1878 if nevents == 0 and req_acc < 0 : 1879 raise aMCatNLOError('Cannot determine the required accuracy from the number '\ 1880 'of events, because 0 events requested. Please set '\ 1881 'the "req_acc" parameter in the run_card to a value '\ 1882 'between 0 and 1') 1883 elif req_acc >1 or req_acc == 0 : 1884 raise aMCatNLOError('Required accuracy ("req_acc" in the run_card) should '\ 1885 'be between larger than 0 and smaller than 1, '\ 1886 'or set to -1 for automatic determination. Current '\ 1887 'value is %f' % req_acc) 1888 # For more than 1M events, set req_acc to 0.001 (except when it was explicitly set in the run_card) 1889 elif req_acc < 0 and nevents > 1000000 : 1890 req_acc=0.001 1891 1892 shower_list = ['HERWIG6', 'HERWIGPP', 'PYTHIA6Q', 'PYTHIA6PT', 'PYTHIA8'] 1893 1894 if not shower in shower_list: 1895 raise aMCatNLOError('%s is not a valid parton shower. '\ 1896 'Please use one of the following: %s' \ 1897 % (shower, ', '.join(shower_list))) 1898 1899 # check that PYTHIA6PT is not used for processes with FSR 1900 if shower == 'PYTHIA6PT' and self.proc_characteristics['has_fsr']: 1901 raise aMCatNLOError('PYTHIA6PT does not support processes with FSR') 1902 1903 if mode in ['aMC@NLO', 'aMC@LO']: 1904 logger.info('Doing %s matched to parton shower' % mode[4:]) 1905 elif mode in ['noshower','noshowerLO']: 1906 logger.info('Generating events without running the shower.') 1907 elif options['only_generation']: 1908 logger.info('Generating events starting from existing results') 1909 1910 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1911 req_acc,mode_dict[mode],1,mode,fixed_order=False) 1912 # Make sure to update all the jobs to be ready for the event generation step 1913 if options['only_generation']: 1914 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1915 jobs_to_collect,1,mode,mode_dict[mode],fixed_order=False) 1916 else: 1917 self.prepare_directories(jobs_to_run,mode,fixed_order=False) 1918 1919 1920 # Main loop over the three MINT generation steps: 1921 for mint_step, status in enumerate(mcatnlo_status): 1922 if options['only_generation'] and mint_step < 2: 1923 continue 1924 self.update_status(status, level='parton') 1925 self.run_all_jobs(jobs_to_run,mint_step,fixed_order=False) 1926 self.collect_log_files(jobs_to_run,mint_step) 1927 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1928 jobs_to_collect,mint_step,mode,mode_dict[mode],fixed_order=False) 1929 if mint_step+1==2 and nevents==0: 1930 self.print_summary(options,2,mode) 1931 return 1932 1933 # Sanity check on the event files. If error the jobs are resubmitted 1934 self.check_event_files(jobs_to_collect) 1935 1936 if self.cluster_mode == 1: 1937 #if cluster run, wait 10 sec so that event files are transferred back 1938 self.update_status( 1939 'Waiting while files are transferred back from the cluster nodes', 1940 level='parton') 1941 time.sleep(10) 1942 1943 event_norm=self.run_card['event_norm'] 1944 return self.reweight_and_collect_events(options, mode, nevents, event_norm)
1945
1946 - def create_jobs_to_run(self,options,p_dirs,req_acc,run_mode,\ 1947 integration_step,mode,fixed_order=True):
1948 """Creates a list of dictionaries with all the jobs to be run""" 1949 jobs_to_run=[] 1950 if not options['only_generation']: 1951 # Fresh, new run. Check all the P*/channels.txt files 1952 # (created by the 'gensym' executable) to set-up all the 1953 # jobs using the default inputs. 1954 npoints = self.run_card['npoints_FO_grid'] 1955 niters = self.run_card['niters_FO_grid'] 1956 for p_dir in p_dirs: 1957 try: 1958 with open(pjoin(self.me_dir,'SubProcesses',p_dir,'channels.txt')) as chan_file: 1959 channels=chan_file.readline().split() 1960 except IOError: 1961 logger.warning('No integration channels found for contribution %s' % p_dir) 1962 continue 1963 if fixed_order: 1964 lch=len(channels) 1965 maxchannels=20 # combine up to 20 channels in a single job 1966 if self.run_card['iappl'] != 0: maxchannels=1 1967 njobs=(int(lch/maxchannels)+1 if lch%maxchannels!= 0 \ 1968 else int(lch/maxchannels)) 1969 for nj in range(1,njobs+1): 1970 job={} 1971 job['p_dir']=p_dir 1972 job['channel']=str(nj) 1973 job['nchans']=(int(lch/njobs)+1 if nj <= lch%njobs else int(lch/njobs)) 1974 job['configs']=' '.join(channels[:job['nchans']]) 1975 del channels[:job['nchans']] 1976 job['split']=0 1977 if req_acc == -1: 1978 job['accuracy']=0 1979 job['niters']=niters 1980 job['npoints']=npoints 1981 elif req_acc > 0: 1982 job['accuracy']=0.05 1983 job['niters']=6 1984 job['npoints']=-1 1985 else: 1986 raise aMCatNLOError('No consistent "req_acc_FO" set. Use a value '+ 1987 'between 0 and 1 or set it equal to -1.') 1988 job['mint_mode']=0 1989 job['run_mode']=run_mode 1990 job['wgt_frac']=1.0 1991 job['wgt_mult']=1.0 1992 jobs_to_run.append(job) 1993 if channels: 1994 raise aMCatNLOError('channels is not empty %s' % channels) 1995 else: 1996 for channel in channels: 1997 job={} 1998 job['p_dir']=p_dir 1999 job['channel']=channel 2000 job['split']=0 2001 job['accuracy']=0.03 2002 job['niters']=12 2003 job['npoints']=-1 2004 job['mint_mode']=0 2005 job['run_mode']=run_mode 2006 job['wgt_frac']=1.0 2007 jobs_to_run.append(job) 2008 jobs_to_collect=copy.copy(jobs_to_run) # These are all jobs 2009 else: 2010 # if options['only_generation'] is true, just read the current jobs from file 2011 try: 2012 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'rb') as f: 2013 jobs_to_collect=pickle.load(f) 2014 for job in jobs_to_collect: 2015 job['dirname']=pjoin(self.me_dir,'SubProcesses',job['dirname'].rsplit('/SubProcesses/',1)[1]) 2016 jobs_to_run=copy.copy(jobs_to_collect) 2017 except: 2018 raise aMCatNLOError('Cannot reconstruct saved job status in %s' % \ 2019 pjoin(self.me_dir,'SubProcesses','job_status.pkl')) 2020 # Update cross sections and determine which jobs to run next 2021 if fixed_order: 2022 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, 2023 jobs_to_collect,integration_step,mode,run_mode) 2024 # Update the integration_step to make sure that nothing will be overwritten 2025 integration_step=1 2026 for job in jobs_to_run: 2027 while os.path.exists(pjoin(job['dirname'],'res_%s.dat' % integration_step)): 2028 integration_step=integration_step+1 2029 integration_step=integration_step-1 2030 else: 2031 self.append_the_results(jobs_to_collect,integration_step) 2032 return jobs_to_run,jobs_to_collect,integration_step
2033
2034 - def prepare_directories(self,jobs_to_run,mode,fixed_order=True):
2035 """Set-up the G* directories for running""" 2036 name_suffix={'born' :'B' , 'all':'F'} 2037 for job in jobs_to_run: 2038 if job['split'] == 0: 2039 if fixed_order : 2040 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2041 job['run_mode']+'_G'+job['channel']) 2042 else: 2043 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2044 'G'+name_suffix[job['run_mode']]+job['channel']) 2045 else: 2046 if fixed_order : 2047 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2048 job['run_mode']+'_G'+job['channel']+'_'+str(job['split'])) 2049 else: 2050 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2051 'G'+name_suffix[job['run_mode']]+job['channel']+'_'+str(job['split'])) 2052 job['dirname']=dirname 2053 if not os.path.isdir(dirname): 2054 os.makedirs(dirname) 2055 self.write_input_file(job,fixed_order) 2056 # link or copy the grids from the base directory to the split directory: 2057 if not fixed_order: 2058 if job['split'] != 0: 2059 for f in ['grid.MC_integer','mint_grids','res_1']: 2060 if not os.path.isfile(pjoin(job['dirname'],f)): 2061 files.ln(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname']) 2062 else: 2063 if job['split'] != 0: 2064 for f in ['grid.MC_integer','mint_grids']: 2065 files.cp(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname'])
2066 2067
2068 - def write_input_file(self,job,fixed_order):
2069 """write the input file for the madevent_mint* executable in the appropriate directory""" 2070 if fixed_order: 2071 content= \ 2072 """NPOINTS = %(npoints)s 2073 NITERATIONS = %(niters)s 2074 ACCURACY = %(accuracy)s 2075 ADAPT_GRID = 2 2076 MULTICHANNEL = 1 2077 SUM_HELICITY = 1 2078 NCHANS = %(nchans)s 2079 CHANNEL = %(configs)s 2080 SPLIT = %(split)s 2081 WGT_MULT= %(wgt_mult)s 2082 RUN_MODE = %(run_mode)s 2083 RESTART = %(mint_mode)s 2084 """ \ 2085 % job 2086 else: 2087 content = \ 2088 """-1 12 ! points, iterations 2089 %(accuracy)s ! desired fractional accuracy 2090 1 -0.1 ! alpha, beta for Gsoft 2091 -1 -0.1 ! alpha, beta for Gazi 2092 1 ! Suppress amplitude (0 no, 1 yes)? 2093 1 ! Exact helicity sum (0 yes, n = number/event)? 2094 %(channel)s ! Enter Configuration Number: 2095 %(mint_mode)s ! MINT imode: 0 to set-up grids, 1 to perform integral, 2 generate events 2096 1 1 1 ! if imode is 1: Folding parameters for xi_i, phi_i and y_ij 2097 %(run_mode)s ! all, born, real, virt 2098 """ \ 2099 % job 2100 with open(pjoin(job['dirname'], 'input_app.txt'), 'w') as input_file: 2101 input_file.write(content)
2102 2103
2104 - def run_all_jobs(self,jobs_to_run,integration_step,fixed_order=True):
2105 """Loops over the jobs_to_run and executes them using the function 'run_exe'""" 2106 if fixed_order: 2107 if integration_step == 0: 2108 self.update_status('Setting up grids', level=None) 2109 else: 2110 self.update_status('Refining results, step %i' % integration_step, level=None) 2111 self.ijob = 0 2112 name_suffix={'born' :'B', 'all':'F'} 2113 if fixed_order: 2114 run_type="Fixed order integration step %s" % integration_step 2115 else: 2116 run_type="MINT step %s" % integration_step 2117 self.njobs=len(jobs_to_run) 2118 for job in jobs_to_run: 2119 executable='ajob1' 2120 if fixed_order: 2121 arguments=[job['channel'],job['run_mode'], \ 2122 str(job['split']),str(integration_step)] 2123 else: 2124 arguments=[job['channel'],name_suffix[job['run_mode']], \ 2125 str(job['split']),str(integration_step)] 2126 self.run_exe(executable,arguments,run_type, 2127 cwd=pjoin(self.me_dir,'SubProcesses',job['p_dir'])) 2128 2129 if self.cluster_mode == 2: 2130 time.sleep(1) # security to allow all jobs to be launched 2131 self.wait_for_complete(run_type)
2132 2133
2134 - def collect_the_results(self,options,req_acc,jobs_to_run,jobs_to_collect,\ 2135 integration_step,mode,run_mode,fixed_order=True):
2136 """Collect the results, make HTML pages, print the summary and 2137 determine if there are more jobs to run. Returns the list 2138 of the jobs that still need to be run, as well as the 2139 complete list of jobs that need to be collected to get the 2140 final answer. 2141 """ 2142 # Get the results of the current integration/MINT step 2143 self.append_the_results(jobs_to_run,integration_step) 2144 self.cross_sect_dict = self.write_res_txt_file(jobs_to_collect,integration_step) 2145 # Update HTML pages 2146 if fixed_order: 2147 cross, error = self.make_make_all_html_results(folder_names=['%s*' % run_mode], 2148 jobs=jobs_to_collect) 2149 else: 2150 name_suffix={'born' :'B' , 'all':'F'} 2151 cross, error = self.make_make_all_html_results(folder_names=['G%s*' % name_suffix[run_mode]]) 2152 self.results.add_detail('cross', cross) 2153 self.results.add_detail('error', error) 2154 # Combine grids from split fixed order jobs 2155 if fixed_order: 2156 jobs_to_run=self.combine_split_order_run(jobs_to_run) 2157 # Set-up jobs for the next iteration/MINT step 2158 jobs_to_run_new=self.update_jobs_to_run(req_acc,integration_step,jobs_to_run,fixed_order) 2159 # IF THERE ARE NO MORE JOBS, WE ARE DONE!!! 2160 if fixed_order: 2161 # Write the jobs_to_collect directory to file so that we 2162 # can restart them later (with only-generation option) 2163 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2164 pickle.dump(jobs_to_collect,f) 2165 # Print summary 2166 if (not jobs_to_run_new) and fixed_order: 2167 # print final summary of results (for fixed order) 2168 scale_pdf_info=self.collect_scale_pdf_info(options,jobs_to_collect) 2169 self.print_summary(options,integration_step,mode,scale_pdf_info,done=True) 2170 return jobs_to_run_new,jobs_to_collect 2171 elif jobs_to_run_new: 2172 # print intermediate summary of results 2173 scale_pdf_info=[] 2174 self.print_summary(options,integration_step,mode,scale_pdf_info,done=False) 2175 else: 2176 # When we are done for (N)LO+PS runs, do not print 2177 # anything yet. This will be done after the reweighting 2178 # and collection of the events 2179 scale_pdf_info=[] 2180 # Prepare for the next integration/MINT step 2181 if (not fixed_order) and integration_step+1 == 2 : 2182 # Write the jobs_to_collect directory to file so that we 2183 # can restart them later (with only-generation option) 2184 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2185 pickle.dump(jobs_to_collect,f) 2186 # next step is event generation (mint_step 2) 2187 jobs_to_run_new,jobs_to_collect_new= \ 2188 self.check_the_need_to_split(jobs_to_run_new,jobs_to_collect) 2189 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2190 self.write_nevents_unweighted_file(jobs_to_collect_new,jobs_to_collect) 2191 self.write_nevts_files(jobs_to_run_new) 2192 else: 2193 if fixed_order and self.run_card['iappl'] == 0 \ 2194 and self.run_card['req_acc_FO'] > 0: 2195 jobs_to_run_new,jobs_to_collect= \ 2196 self.split_jobs_fixed_order(jobs_to_run_new,jobs_to_collect) 2197 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2198 jobs_to_collect_new=jobs_to_collect 2199 return jobs_to_run_new,jobs_to_collect_new
2200 2201
2202 - def write_nevents_unweighted_file(self,jobs,jobs0events):
2203 """writes the nevents_unweighted file in the SubProcesses directory. 2204 We also need to write the jobs that will generate 0 events, 2205 because that makes sure that the cross section from those channels 2206 is taken into account in the event weights (by collect_events.f). 2207 """ 2208 content=[] 2209 for job in jobs: 2210 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2211 lhefile=pjoin(path,'events.lhe') 2212 content.append(' %s %d %9e %9e' % \ 2213 (lhefile.ljust(40),job['nevents'],job['resultABS']*job['wgt_frac'],job['wgt_frac'])) 2214 for job in jobs0events: 2215 if job['nevents']==0: 2216 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2217 lhefile=pjoin(path,'events.lhe') 2218 content.append(' %s %d %9e %9e' % \ 2219 (lhefile.ljust(40),job['nevents'],job['resultABS'],1.)) 2220 with open(pjoin(self.me_dir,'SubProcesses',"nevents_unweighted"),'w') as f: 2221 f.write('\n'.join(content)+'\n')
2222
2223 - def write_nevts_files(self,jobs):
2224 """write the nevts files in the SubProcesses/P*/G*/ directories""" 2225 for job in jobs: 2226 with open(pjoin(job['dirname'],'nevts'),'w') as f: 2227 if self.run_card['event_norm'].lower()=='bias': 2228 f.write('%i %f\n' % (job['nevents'],self.cross_sect_dict['xseca'])) 2229 else: 2230 f.write('%i\n' % job['nevents'])
2231
2232 - def combine_split_order_run(self,jobs_to_run):
2233 """Combines jobs and grids from split jobs that have been run""" 2234 # combine the jobs that need to be combined in job 2235 # groups. Simply combine the ones that have the same p_dir and 2236 # same channel. 2237 jobgroups_to_combine=[] 2238 jobs_to_run_new=[] 2239 for job in jobs_to_run: 2240 if job['split'] == 0: 2241 job['combined']=1 2242 jobs_to_run_new.append(job) # this jobs wasn't split 2243 elif job['split'] == 1: 2244 jobgroups_to_combine.append(filter(lambda j: j['p_dir'] == job['p_dir'] and \ 2245 j['channel'] == job['channel'], jobs_to_run)) 2246 else: 2247 continue 2248 for job_group in jobgroups_to_combine: 2249 # Combine the grids (mint-grids & MC-integer grids) first 2250 self.combine_split_order_grids(job_group) 2251 jobs_to_run_new.append(self.combine_split_order_jobs(job_group)) 2252 return jobs_to_run_new
2253
2254 - def combine_split_order_jobs(self,job_group):
2255 """combine the jobs in job_group and return a single summed job""" 2256 # first copy one of the jobs in 'jobs' 2257 sum_job=copy.copy(job_group[0]) 2258 # update the information to have a 'non-split' job: 2259 sum_job['dirname']=pjoin(sum_job['dirname'].rsplit('_',1)[0]) 2260 sum_job['split']=0 2261 sum_job['wgt_mult']=1.0 2262 sum_job['combined']=len(job_group) 2263 # information to be summed: 2264 keys=['niters_done','npoints_done','niters','npoints',\ 2265 'result','resultABS','time_spend'] 2266 keys2=['error','errorABS'] 2267 # information to be summed in quadrature: 2268 for key in keys2: 2269 sum_job[key]=math.pow(sum_job[key],2) 2270 # Loop over the jobs and sum the information 2271 for i,job in enumerate(job_group): 2272 if i==0 : continue # skip the first 2273 for key in keys: 2274 sum_job[key]+=job[key] 2275 for key in keys2: 2276 sum_job[key]+=math.pow(job[key],2) 2277 for key in keys2: 2278 sum_job[key]=math.sqrt(sum_job[key]) 2279 sum_job['err_percABS'] = sum_job['errorABS']/sum_job['resultABS']*100. 2280 sum_job['err_perc'] = sum_job['error']/sum_job['result']*100. 2281 sum_job['niters']=int(sum_job['niters_done']/len(job_group)) 2282 sum_job['niters_done']=int(sum_job['niters_done']/len(job_group)) 2283 return sum_job
2284 2285
2286 - def combine_split_order_grids(self,job_group):
2287 """Combines the mint_grids and MC-integer grids from the split order 2288 jobs (fixed order only). 2289 """ 2290 files_mint_grids=[] 2291 files_MC_integer=[] 2292 location=None 2293 for job in job_group: 2294 files_mint_grids.append(pjoin(job['dirname'],'mint_grids')) 2295 files_MC_integer.append(pjoin(job['dirname'],'grid.MC_integer')) 2296 if not location: 2297 location=pjoin(job['dirname'].rsplit('_',1)[0]) 2298 else: 2299 if location != pjoin(job['dirname'].rsplit('_',1)[0]) : 2300 raise aMCatNLOError('Not all jobs have the same location. '\ 2301 +'Cannot combine them.') 2302 # Needed to average the grids (both xgrids, ave_virt and 2303 # MC_integer grids), but sum the cross section info. The 2304 # latter is only the only line that contains integers. 2305 for j,fs in enumerate([files_mint_grids,files_MC_integer]): 2306 linesoffiles=[] 2307 for f in fs: 2308 with open(f,'r+') as fi: 2309 linesoffiles.append(fi.readlines()) 2310 to_write=[] 2311 for rowgrp in zip(*linesoffiles): 2312 try: 2313 # check that last element on the line is an 2314 # integer (will raise ValueError if not the 2315 # case). If integer, this is the line that 2316 # contains information that needs to be 2317 # summed. All other lines can be averaged. 2318 is_integer = [[int(row.strip().split()[-1])] for row in rowgrp] 2319 floatsbyfile = [[float(a) for a in row.strip().split()] for row in rowgrp] 2320 floatgrps = zip(*floatsbyfile) 2321 special=[] 2322 for i,floatgrp in enumerate(floatgrps): 2323 if i==0: # sum X-sec 2324 special.append(sum(floatgrp)) 2325 elif i==1: # sum unc in quadrature 2326 special.append(math.sqrt(sum([err**2 for err in floatgrp]))) 2327 elif i==2: # average number of PS per iteration 2328 special.append(int(sum(floatgrp)/len(floatgrp))) 2329 elif i==3: # sum the number of iterations 2330 special.append(int(sum(floatgrp))) 2331 elif i==4: # average the nhits_in_grids 2332 special.append(int(sum(floatgrp)/len(floatgrp))) 2333 else: 2334 raise aMCatNLOError('"mint_grids" files not in correct format. '+\ 2335 'Cannot combine them.') 2336 to_write.append(" ".join(str(s) for s in special) + "\n") 2337 except ValueError: 2338 # just average all 2339 floatsbyfile = [[float(a) for a in row.strip().split()] for row in rowgrp] 2340 floatgrps = zip(*floatsbyfile) 2341 averages = [sum(floatgrp)/len(floatgrp) for floatgrp in floatgrps] 2342 to_write.append(" ".join(str(a) for a in averages) + "\n") 2343 # write the data over the master location 2344 if j==0: 2345 with open(pjoin(location,'mint_grids'),'w') as f: 2346 f.writelines(to_write) 2347 elif j==1: 2348 with open(pjoin(location,'grid.MC_integer'),'w') as f: 2349 f.writelines(to_write)
2350 2351
2352 - def split_jobs_fixed_order(self,jobs_to_run,jobs_to_collect):
2353 """Looks in the jobs_to_run to see if there is the need to split the 2354 jobs, depending on the expected time they take. Updates 2355 jobs_to_run and jobs_to_collect to replace the split-job by 2356 its splits. 2357 """ 2358 # determine the number jobs we should have (this is per p_dir) 2359 if self.options['run_mode'] ==2: 2360 nb_submit = int(self.options['nb_core']) 2361 elif self.options['run_mode'] ==1: 2362 nb_submit = int(self.options['cluster_size']) 2363 else: 2364 nb_submit =1 2365 # total expected aggregated running time 2366 time_expected=0 2367 for job in jobs_to_run: 2368 time_expected+=job['time_spend']*(job['niters']*job['npoints'])/ \ 2369 (job['niters_done']*job['npoints_done']) 2370 # this means that we must expect the following per job (in 2371 # ideal conditions) 2372 time_per_job=time_expected/(nb_submit*(1+len(jobs_to_run)/2)) 2373 jobs_to_run_new=[] 2374 jobs_to_collect_new=copy.copy(jobs_to_collect) 2375 for job in jobs_to_run: 2376 # remove current job from jobs_to_collect. Make sure 2377 # to remove all the split ones in case the original 2378 # job had been a split one (before it was re-combined) 2379 for j in filter(lambda j: j['p_dir'] == job['p_dir'] and \ 2380 j['channel'] == job['channel'], jobs_to_collect_new): 2381 jobs_to_collect_new.remove(j) 2382 time_expected=job['time_spend']*(job['niters']*job['npoints'])/ \ 2383 (job['niters_done']*job['npoints_done']) 2384 # if the time expected for this job is (much) larger than 2385 # the time spend in the previous iteration, and larger 2386 # than the expected time per job, split it 2387 if time_expected > max(2*job['time_spend']/job['combined'],time_per_job): 2388 # determine the number of splits needed 2389 nsplit=min(max(int(time_expected/max(2*job['time_spend']/job['combined'],time_per_job)),2),nb_submit) 2390 for i in range(1,nsplit+1): 2391 job_new=copy.copy(job) 2392 job_new['split']=i 2393 job_new['wgt_mult']=1./float(nsplit) 2394 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2395 job_new['accuracy']=min(job['accuracy']*math.sqrt(float(nsplit)),0.1) 2396 if nsplit >= job['niters']: 2397 job_new['npoints']=int(job['npoints']*job['niters']/nsplit) 2398 job_new['niters']=1 2399 else: 2400 job_new['npoints']=int(job['npoints']/nsplit) 2401 jobs_to_collect_new.append(job_new) 2402 jobs_to_run_new.append(job_new) 2403 else: 2404 jobs_to_collect_new.append(job) 2405 jobs_to_run_new.append(job) 2406 return jobs_to_run_new,jobs_to_collect_new
2407 2408
2409 - def check_the_need_to_split(self,jobs_to_run,jobs_to_collect):
2410 """Looks in the jobs_to_run to see if there is the need to split the 2411 event generation step. Updates jobs_to_run and 2412 jobs_to_collect to replace the split-job by its 2413 splits. Also removes jobs that do not need any events. 2414 """ 2415 nevt_job=self.run_card['nevt_job'] 2416 if nevt_job > 0: 2417 jobs_to_collect_new=copy.copy(jobs_to_collect) 2418 for job in jobs_to_run: 2419 nevents=job['nevents'] 2420 if nevents == 0: 2421 jobs_to_collect_new.remove(job) 2422 elif nevents > nevt_job: 2423 jobs_to_collect_new.remove(job) 2424 if nevents % nevt_job != 0 : 2425 nsplit=int(nevents/nevt_job)+1 2426 else: 2427 nsplit=int(nevents/nevt_job) 2428 for i in range(1,nsplit+1): 2429 job_new=copy.copy(job) 2430 left_over=nevents % nsplit 2431 if i <= left_over: 2432 job_new['nevents']=int(nevents/nsplit)+1 2433 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2434 else: 2435 job_new['nevents']=int(nevents/nsplit) 2436 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2437 job_new['split']=i 2438 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2439 jobs_to_collect_new.append(job_new) 2440 jobs_to_run_new=copy.copy(jobs_to_collect_new) 2441 else: 2442 jobs_to_run_new=copy.copy(jobs_to_collect) 2443 for job in jobs_to_collect: 2444 if job['nevents'] == 0: 2445 jobs_to_run_new.remove(job) 2446 jobs_to_collect_new=copy.copy(jobs_to_run_new) 2447 2448 return jobs_to_run_new,jobs_to_collect_new
2449 2450
2451 - def update_jobs_to_run(self,req_acc,step,jobs,fixed_order=True):
2452 """ 2453 For (N)LO+PS: determines the number of events and/or the required 2454 accuracy per job. 2455 For fixed order: determines which jobs need higher precision and 2456 returns those with the newly requested precision. 2457 """ 2458 err=self.cross_sect_dict['errt'] 2459 tot=self.cross_sect_dict['xsect'] 2460 errABS=self.cross_sect_dict['erra'] 2461 totABS=self.cross_sect_dict['xseca'] 2462 jobs_new=[] 2463 if fixed_order: 2464 if req_acc == -1: 2465 if step+1 == 1: 2466 npoints = self.run_card['npoints_FO'] 2467 niters = self.run_card['niters_FO'] 2468 for job in jobs: 2469 job['mint_mode']=-1 2470 job['niters']=niters 2471 job['npoints']=npoints 2472 jobs_new.append(job) 2473 elif step+1 == 2: 2474 pass 2475 elif step+1 > 2: 2476 raise aMCatNLOError('Cannot determine number of iterations and PS points '+ 2477 'for integration step %i' % step ) 2478 elif ( req_acc > 0 and err/abs(tot) > req_acc*1.2 ) or step <= 0: 2479 req_accABS=req_acc*abs(tot)/totABS # overal relative required accuracy on ABS Xsec. 2480 for job in jobs: 2481 job['mint_mode']=-1 2482 # Determine relative required accuracy on the ABS for this job 2483 job['accuracy']=req_accABS*math.sqrt(totABS/job['resultABS']) 2484 # If already accurate enough, skip the job (except when doing the first 2485 # step for the iappl=2 run: we need to fill all the applgrid grids!) 2486 if (job['accuracy'] > job['errorABS']/job['resultABS'] and step != 0) \ 2487 and not (step==-1 and self.run_card['iappl'] == 2): 2488 continue 2489 # Update the number of PS points based on errorABS, ncall and accuracy 2490 itmax_fl=job['niters_done']*math.pow(job['errorABS']/ 2491 (job['accuracy']*job['resultABS']),2) 2492 if itmax_fl <= 4.0 : 2493 job['niters']=max(int(round(itmax_fl)),2) 2494 job['npoints']=job['npoints_done']*2 2495 elif itmax_fl > 4.0 and itmax_fl <= 16.0 : 2496 job['niters']=4 2497 job['npoints']=int(round(job['npoints_done']*itmax_fl/4.0))*2 2498 else: 2499 if itmax_fl > 100.0 : itmax_fl=50.0 2500 job['niters']=int(round(math.sqrt(itmax_fl))) 2501 job['npoints']=int(round(job['npoints_done']*itmax_fl/ 2502 round(math.sqrt(itmax_fl))))*2 2503 # Add the job to the list of jobs that need to be run 2504 jobs_new.append(job) 2505 return jobs_new 2506 elif step+1 <= 2: 2507 nevents=self.run_card['nevents'] 2508 # Total required accuracy for the upper bounding envelope 2509 if req_acc<0: 2510 req_acc2_inv=nevents 2511 else: 2512 req_acc2_inv=1/(req_acc*req_acc) 2513 if step+1 == 1 or step+1 == 2 : 2514 # determine the req. accuracy for each of the jobs for Mint-step = 1 2515 for job in jobs: 2516 accuracy=min(math.sqrt(totABS/(req_acc2_inv*job['resultABS'])),0.2) 2517 job['accuracy']=accuracy 2518 if step+1 == 2: 2519 # Randomly (based on the relative ABS Xsec of the job) determine the 2520 # number of events each job needs to generate for MINT-step = 2. 2521 r=self.get_randinit_seed() 2522 random.seed(r) 2523 totevts=nevents 2524 for job in jobs: 2525 job['nevents'] = 0 2526 while totevts : 2527 target = random.random() * totABS 2528 crosssum = 0. 2529 i = 0 2530 while i<len(jobs) and crosssum < target: 2531 job = jobs[i] 2532 crosssum += job['resultABS'] 2533 i += 1 2534 totevts -= 1 2535 i -= 1 2536 jobs[i]['nevents'] += 1 2537 for job in jobs: 2538 job['mint_mode']=step+1 # next step 2539 return jobs 2540 else: 2541 return []
2542 2543
2544 - def get_randinit_seed(self):
2545 """ Get the random number seed from the randinit file """ 2546 with open(pjoin(self.me_dir,"SubProcesses","randinit")) as randinit: 2547 # format of the file is "r=%d". 2548 iseed = int(randinit.read()[2:]) 2549 return iseed
2550 2551
2552 - def append_the_results(self,jobs,integration_step):
2553 """Appends the results for each of the jobs in the job list""" 2554 error_found=False 2555 for job in jobs: 2556 try: 2557 if integration_step >= 0 : 2558 with open(pjoin(job['dirname'],'res_%s.dat' % integration_step)) as res_file: 2559 results=res_file.readline().split() 2560 else: 2561 # should only be here when doing fixed order with the 'only_generation' 2562 # option equal to True. Take the results from the final run done. 2563 with open(pjoin(job['dirname'],'res.dat')) as res_file: 2564 results=res_file.readline().split() 2565 except IOError: 2566 if not error_found: 2567 error_found=True 2568 error_log=[] 2569 error_log.append(pjoin(job['dirname'],'log.txt')) 2570 continue 2571 job['resultABS']=float(results[0]) 2572 job['errorABS']=float(results[1]) 2573 job['result']=float(results[2]) 2574 job['error']=float(results[3]) 2575 job['niters_done']=int(results[4]) 2576 job['npoints_done']=int(results[5]) 2577 job['time_spend']=float(results[6]) 2578 job['err_percABS'] = job['errorABS']/job['resultABS']*100. 2579 job['err_perc'] = job['error']/job['result']*100. 2580 if error_found: 2581 raise aMCatNLOError('An error occurred during the collection of results.\n' + 2582 'Please check the .log files inside the directories which failed:\n' + 2583 '\n'.join(error_log)+'\n')
2584 2585 2586
2587 - def write_res_txt_file(self,jobs,integration_step):
2588 """writes the res.txt files in the SubProcess dir""" 2589 jobs.sort(key = lambda job: -job['errorABS']) 2590 content=[] 2591 content.append('\n\nCross section per integration channel:') 2592 for job in jobs: 2593 content.append('%(p_dir)20s %(channel)15s %(result)10.8e %(error)6.4e %(err_perc)6.4f%% ' % job) 2594 content.append('\n\nABS cross section per integration channel:') 2595 for job in jobs: 2596 content.append('%(p_dir)20s %(channel)15s %(resultABS)10.8e %(errorABS)6.4e %(err_percABS)6.4f%% ' % job) 2597 totABS=0 2598 errABS=0 2599 tot=0 2600 err=0 2601 for job in jobs: 2602 totABS+= job['resultABS']*job['wgt_frac'] 2603 errABS+= math.pow(job['errorABS'],2)*job['wgt_frac'] 2604 tot+= job['result']*job['wgt_frac'] 2605 err+= math.pow(job['error'],2)*job['wgt_frac'] 2606 if jobs: 2607 content.append('\nTotal ABS and \nTotal: \n %10.8e +- %6.4e (%6.4e%%)\n %10.8e +- %6.4e (%6.4e%%) \n' %\ 2608 (totABS, math.sqrt(errABS), math.sqrt(errABS)/totABS *100.,\ 2609 tot, math.sqrt(err), math.sqrt(err)/tot *100.)) 2610 with open(pjoin(self.me_dir,'SubProcesses','res_%s.txt' % integration_step),'w') as res_file: 2611 res_file.write('\n'.join(content)) 2612 randinit=self.get_randinit_seed() 2613 return {'xsect':tot,'xseca':totABS,'errt':math.sqrt(err),\ 2614 'erra':math.sqrt(errABS),'randinit':randinit}
2615 2616
2617 - def collect_scale_pdf_info(self,options,jobs):
2618 """read the scale_pdf_dependence.dat files and collects there results""" 2619 scale_pdf_info=[] 2620 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 2621 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1: 2622 evt_files=[] 2623 evt_wghts=[] 2624 for job in jobs: 2625 evt_files.append(pjoin(job['dirname'],'scale_pdf_dependence.dat')) 2626 evt_wghts.append(job['wgt_frac']) 2627 scale_pdf_info = self.pdf_scale_from_reweighting(evt_files,evt_wghts) 2628 return scale_pdf_info
2629 2630
2631 - def combine_plots_FO(self,folder_name,jobs):
2632 """combines the plots and puts then in the Events/run* directory""" 2633 devnull = open(os.devnull, 'w') 2634 2635 if self.analyse_card['fo_analysis_format'].lower() == 'topdrawer': 2636 topfiles = [] 2637 for job in jobs: 2638 if job['dirname'].endswith('.top'): 2639 topfiles.append(job['dirname']) 2640 else: 2641 topfiles.append(pjoin(job['dirname'],'MADatNLO.top')) 2642 misc.call(['./combine_plots_FO.sh'] + topfiles, \ 2643 stdout=devnull, 2644 cwd=pjoin(self.me_dir, 'SubProcesses')) 2645 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.top'), 2646 pjoin(self.me_dir, 'Events', self.run_name)) 2647 logger.info('The results of this run and the TopDrawer file with the plots' + \ 2648 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2649 elif self.analyse_card['fo_analysis_format'].lower() == 'hwu': 2650 out=pjoin(self.me_dir,'Events',self.run_name,'MADatNLO') 2651 self.combine_plots_HwU(jobs,out) 2652 try: 2653 misc.call(['gnuplot','MADatNLO.gnuplot'],\ 2654 stdout=devnull,stderr=devnull,\ 2655 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2656 except Exception: 2657 pass 2658 logger.info('The results of this run and the HwU and GnuPlot files with the plots' + \ 2659 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2660 elif self.analyse_card['fo_analysis_format'].lower() == 'root': 2661 rootfiles = [] 2662 for job in jobs: 2663 if job['dirname'].endswith('.root'): 2664 rootfiles.append(job['dirname']) 2665 else: 2666 rootfiles.append(pjoin(job['dirname'],'MADatNLO.root')) 2667 misc.call(['./combine_root.sh'] + folder_name + rootfiles, \ 2668 stdout=devnull, 2669 cwd=pjoin(self.me_dir, 'SubProcesses')) 2670 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.root'), 2671 pjoin(self.me_dir, 'Events', self.run_name)) 2672 logger.info('The results of this run and the ROOT file with the plots' + \ 2673 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2674 elif self.analyse_card['fo_analysis_format'].lower() == 'lhe': 2675 self.combine_FO_lhe(jobs) 2676 logger.info('The results of this run and the LHE File (to be used for plotting only)' + \ 2677 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2678 else: 2679 logger.info('The results of this run' + \ 2680 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name))
2681
2682 - def combine_FO_lhe(self,jobs):
2683 """combine the various lhe file generated in each directory. 2684 They are two steps: 2685 1) banner 2686 2) reweight each sample by the factor written at the end of each file 2687 3) concatenate each of the new files (gzip those). 2688 """ 2689 2690 logger.info('Combining lhe events for plotting analysis') 2691 start = time.time() 2692 self.run_card['fo_lhe_postprocessing'] = [i.lower() for i in self.run_card['fo_lhe_postprocessing']] 2693 output = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 2694 if os.path.exists(output): 2695 os.remove(output) 2696 2697 2698 2699 2700 # 1. write the banner 2701 text = open(pjoin(jobs[0]['dirname'],'header.txt'),'r').read() 2702 i1, i2 = text.find('<initrwgt>'),text.find('</initrwgt>') 2703 self.banner['initrwgt'] = text[10+i1:i2] 2704 # 2705 # <init> 2706 # 2212 2212 6.500000e+03 6.500000e+03 0 0 247000 247000 -4 1 2707 # 8.430000e+02 2.132160e+00 8.430000e+02 1 2708 # <generator name='MadGraph5_aMC@NLO' version='2.5.2'>please cite 1405.0301 </generator> 2709 # </init> 2710 2711 cross = sum(j['result'] for j in jobs) 2712 error = math.sqrt(sum(j['error'] for j in jobs)) 2713 self.banner['init'] = "0 0 0e0 0e0 0 0 0 0 -4 1\n %s %s %s 1" % (cross, error, cross) 2714 self.banner.write(output[:-3], close_tag=False) 2715 misc.gzip(output[:-3]) 2716 2717 2718 2719 fsock = lhe_parser.EventFile(output,'a') 2720 if 'nogrouping' in self.run_card['fo_lhe_postprocessing']: 2721 fsock.eventgroup = False 2722 else: 2723 fsock.eventgroup = True 2724 2725 if 'norandom' in self.run_card['fo_lhe_postprocessing']: 2726 for job in jobs: 2727 dirname = job['dirname'] 2728 #read last line 2729 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2730 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2731 # get normalisation ratio 2732 ratio = cross/sumwgt 2733 lhe = lhe_parser.EventFile(pjoin(dirname,'events.lhe')) 2734 lhe.eventgroup = True # read the events by eventgroup 2735 for eventsgroup in lhe: 2736 neweventsgroup = [] 2737 for i,event in enumerate(eventsgroup): 2738 event.rescale_weights(ratio) 2739 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2740 and event == neweventsgroup[-1]: 2741 neweventsgroup[-1].wgt += event.wgt 2742 for key in event.reweight_data: 2743 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2744 else: 2745 neweventsgroup.append(event) 2746 fsock.write_events(neweventsgroup) 2747 lhe.close() 2748 os.remove(pjoin(dirname,'events.lhe')) 2749 else: 2750 lhe = [] 2751 lenlhe = [] 2752 misc.sprint('need to combine %s event file' % len(jobs)) 2753 globallhe = lhe_parser.MultiEventFile() 2754 globallhe.eventgroup = True 2755 for job in jobs: 2756 dirname = job['dirname'] 2757 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2758 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2759 lastlhe = globallhe.add(pjoin(dirname,'events.lhe'),cross, 0, cross, 2760 nb_event=int(nb_event), scale=cross/sumwgt) 2761 for eventsgroup in globallhe: 2762 neweventsgroup = [] 2763 for i,event in enumerate(eventsgroup): 2764 event.rescale_weights(event.sample_scale) 2765 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2766 and event == neweventsgroup[-1]: 2767 neweventsgroup[-1].wgt += event.wgt 2768 for key in event.reweight_data: 2769 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2770 else: 2771 neweventsgroup.append(event) 2772 fsock.write_events(neweventsgroup) 2773 globallhe.close() 2774 fsock.write('</LesHouchesEvents>\n') 2775 fsock.close() 2776 misc.sprint('combining lhe file done in ', time.time()-start) 2777 for job in jobs: 2778 dirname = job['dirname'] 2779 os.remove(pjoin(dirname,'events.lhe')) 2780 2781 2782 2783 misc.sprint('combining lhe file done in ', time.time()-start)
2784 2785 2786 2787 2788 2789
2790 - def combine_plots_HwU(self,jobs,out,normalisation=None):
2791 """Sums all the plots in the HwU format.""" 2792 logger.debug('Combining HwU plots.') 2793 2794 command = [sys.executable] 2795 command.append(pjoin(self.me_dir, 'bin', 'internal','histograms.py')) 2796 for job in jobs: 2797 if job['dirname'].endswith('.HwU'): 2798 command.append(job['dirname']) 2799 else: 2800 command.append(pjoin(job['dirname'],'MADatNLO.HwU')) 2801 command.append("--out="+out) 2802 command.append("--gnuplot") 2803 command.append("--band=[]") 2804 command.append("--lhapdf-config="+self.options['lhapdf']) 2805 if normalisation: 2806 command.append("--multiply="+(','.join([str(n) for n in normalisation]))) 2807 command.append("--sum") 2808 command.append("--keep_all_weights") 2809 command.append("--no_open") 2810 2811 p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=self.me_dir) 2812 2813 while p.poll() is None: 2814 line = p.stdout.readline() 2815 if any(t in line for t in ['INFO:','WARNING:','CRITICAL:','ERROR:','KEEP:']): 2816 print line[:-1] 2817 elif __debug__ and line: 2818 logger.debug(line[:-1])
2819 2820
2821 - def applgrid_combine(self,cross,error,jobs):
2822 """Combines the APPLgrids in all the SubProcess/P*/all_G*/ directories""" 2823 logger.debug('Combining APPLgrids \n') 2824 applcomb=pjoin(self.options['applgrid'].rstrip('applgrid-config'), 2825 'applgrid-combine') 2826 all_jobs=[] 2827 for job in jobs: 2828 all_jobs.append(job['dirname']) 2829 ngrids=len(all_jobs) 2830 nobs =len([name for name in os.listdir(all_jobs[0]) if name.endswith("_out.root")]) 2831 for obs in range(0,nobs): 2832 gdir = [pjoin(job,"grid_obs_"+str(obs)+"_out.root") for job in all_jobs] 2833 # combine APPLgrids from different channels for observable 'obs' 2834 if self.run_card["iappl"] == 1: 2835 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events",self.run_name, 2836 "aMCfast_obs_"+str(obs)+"_starting_grid.root"), '--optimise']+ gdir) 2837 elif self.run_card["iappl"] == 2: 2838 unc2_inv=pow(cross/error,2) 2839 unc2_inv_ngrids=pow(cross/error,2)*ngrids 2840 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events", 2841 self.run_name,"aMCfast_obs_"+str(obs)+".root"),'-s', 2842 str(unc2_inv),'--weight',str(unc2_inv)]+ gdir) 2843 for job in all_jobs: 2844 os.remove(pjoin(job,"grid_obs_"+str(obs)+"_in.root")) 2845 else: 2846 raise aMCatNLOError('iappl parameter can only be 0, 1 or 2') 2847 # after combining, delete the original grids 2848 for ggdir in gdir: 2849 os.remove(ggdir)
2850 2851
2852 - def applgrid_distribute(self,options,mode,p_dirs):
2853 """Distributes the APPLgrids ready to be filled by a second run of the code""" 2854 # if no appl_start_grid argument given, guess it from the time stamps 2855 # of the starting grid files 2856 if not('appl_start_grid' in options.keys() and options['appl_start_grid']): 2857 gfiles = misc.glob(pjoin('*', 'aMCfast_obs_0_starting_grid.root'), 2858 pjoin(self.me_dir,'Events')) 2859 2860 time_stamps={} 2861 for root_file in gfiles: 2862 time_stamps[root_file]=os.path.getmtime(root_file) 2863 options['appl_start_grid']= \ 2864 max(time_stamps.iterkeys(), key=(lambda key: 2865 time_stamps[key])).split('/')[-2] 2866 logger.info('No --appl_start_grid option given. '+\ 2867 'Guessing that start grid from run "%s" should be used.' \ 2868 % options['appl_start_grid']) 2869 2870 if 'appl_start_grid' in options.keys() and options['appl_start_grid']: 2871 self.appl_start_grid = options['appl_start_grid'] 2872 start_grid_dir=pjoin(self.me_dir, 'Events', self.appl_start_grid) 2873 # check that this dir exists and at least one grid file is there 2874 if not os.path.exists(pjoin(start_grid_dir, 2875 'aMCfast_obs_0_starting_grid.root')): 2876 raise self.InvalidCmd('APPLgrid file not found: %s' % \ 2877 pjoin(start_grid_dir,'aMCfast_obs_0_starting_grid.root')) 2878 else: 2879 all_grids=[pjoin(start_grid_dir,name) for name in os.listdir( \ 2880 start_grid_dir) if name.endswith("_starting_grid.root")] 2881 nobs =len(all_grids) 2882 gstring=" ".join(all_grids) 2883 if not hasattr(self, 'appl_start_grid') or not self.appl_start_grid: 2884 raise self.InvalidCmd('No APPLgrid name currently defined.'+ 2885 'Please provide this information.') 2886 #copy the grid to all relevant directories 2887 for pdir in p_dirs: 2888 g_dirs = [file for file in os.listdir(pjoin(self.me_dir, 2889 "SubProcesses",pdir)) if file.startswith(mode+'_G') and 2890 os.path.isdir(pjoin(self.me_dir,"SubProcesses",pdir, file))] 2891 for g_dir in g_dirs: 2892 for grid in all_grids: 2893 obs=grid.split('_')[-3] 2894 files.cp(grid,pjoin(self.me_dir,"SubProcesses",pdir,g_dir, 2895 'grid_obs_'+obs+'_in.root'))
2896 2897 2898 2899
2900 - def collect_log_files(self, jobs, integration_step):
2901 """collect the log files and put them in a single, html-friendly file 2902 inside the Events/run_.../ directory""" 2903 log_file = pjoin(self.me_dir, 'Events', self.run_name, 2904 'alllogs_%d.html' % integration_step) 2905 outfile = open(log_file, 'w') 2906 2907 content = '' 2908 content += '<HTML><BODY>\n<font face="courier" size=2>' 2909 for job in jobs: 2910 # put an anchor 2911 log=pjoin(job['dirname'],'log_MINT%s.txt' % integration_step) 2912 content += '<a name=%s></a>\n' % (os.path.dirname(log).replace( 2913 pjoin(self.me_dir,'SubProcesses'),'')) 2914 # and put some nice header 2915 content += '<font color="red">\n' 2916 content += '<br>LOG file for integration channel %s, %s <br>' % \ 2917 (os.path.dirname(log).replace(pjoin(self.me_dir, 2918 'SubProcesses'), ''), 2919 integration_step) 2920 content += '</font>\n' 2921 #then just flush the content of the small log inside the big log 2922 #the PRE tag prints everything verbatim 2923 with open(log) as l: 2924 content += '<PRE>\n' + l.read() + '\n</PRE>' 2925 content +='<br>\n' 2926 outfile.write(content) 2927 content='' 2928 2929 outfile.write('</font>\n</BODY></HTML>\n') 2930 outfile.close()
2931 2932
2933 - def finalise_run_FO(self,folder_name,jobs):
2934 """Combine the plots and put the res*.txt files in the Events/run.../ folder.""" 2935 # Copy the res_*.txt files to the Events/run* folder 2936 res_files = misc.glob('res_*.txt', pjoin(self.me_dir, 'SubProcesses')) 2937 for res_file in res_files: 2938 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 2939 # Collect the plots and put them in the Events/run* folder 2940 self.combine_plots_FO(folder_name,jobs) 2941 # If doing the applgrid-stuff, also combine those grids 2942 # and put those in the Events/run* folder 2943 if self.run_card['iappl'] != 0: 2944 cross=self.cross_sect_dict['xsect'] 2945 error=self.cross_sect_dict['errt'] 2946 self.applgrid_combine(cross,error,jobs)
2947 2948
2949 - def setup_cluster_or_multicore(self):
2950 """setup the number of cores for multicore, and the cluster-type for cluster runs""" 2951 if self.cluster_mode == 1: 2952 cluster_name = self.options['cluster_type'] 2953 try: 2954 self.cluster = cluster.from_name[cluster_name](**self.options) 2955 except KeyError: 2956 # Check if a plugin define this type of cluster 2957 # check for PLUGIN format 2958 cluster_class = misc.from_plugin_import(self.plugin_path, 2959 'new_cluster', cluster_name, 2960 info = 'cluster handling will be done with PLUGIN: %{plug}s' ) 2961 if cluster_class: 2962 self.cluster = cluster_class(**self.options) 2963 2964 if self.cluster_mode == 2: 2965 try: 2966 import multiprocessing 2967 if not self.nb_core: 2968 try: 2969 self.nb_core = int(self.options['nb_core']) 2970 except TypeError: 2971 self.nb_core = multiprocessing.cpu_count() 2972 logger.info('Using %d cores' % self.nb_core) 2973 except ImportError: 2974 self.nb_core = 1 2975 logger.warning('Impossible to detect the number of cores => Using One.\n'+ 2976 'Use set nb_core X in order to set this number and be able to'+ 2977 'run in multicore.') 2978 2979 self.cluster = cluster.MultiCore(**self.options)
2980 2981
2982 - def clean_previous_results(self,options,p_dirs,folder_name):
2983 """Clean previous results. 2984 o. If doing only the reweighting step, do not delete anything and return directlty. 2985 o. Always remove all the G*_* files (from split event generation). 2986 o. Remove the G* (or born_G* or all_G*) only when NOT doing only_generation or reweight_only.""" 2987 if options['reweightonly']: 2988 return 2989 if not options['only_generation']: 2990 self.update_status('Cleaning previous results', level=None) 2991 for dir in p_dirs: 2992 #find old folders to be removed 2993 for obj in folder_name: 2994 # list all the G* (or all_G* or born_G*) directories 2995 to_rm = [file for file in \ 2996 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 2997 if file.startswith(obj[:-1]) and \ 2998 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 2999 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3000 # list all the G*_* directories (from split event generation) 3001 to_always_rm = [file for file in \ 3002 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 3003 if file.startswith(obj[:-1]) and 3004 '_' in file and not '_G' in file and \ 3005 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 3006 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3007 3008 if not options['only_generation']: 3009 to_always_rm.extend(to_rm) 3010 if os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')): 3011 to_always_rm.append(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')) 3012 files.rm([pjoin(self.me_dir, 'SubProcesses', dir, d) for d in to_always_rm]) 3013 return
3014 3015
3016 - def print_summary(self, options, step, mode, scale_pdf_info=[], done=True):
3017 """print a summary of the results contained in self.cross_sect_dict. 3018 step corresponds to the mintMC step, if =2 (i.e. after event generation) 3019 some additional infos are printed""" 3020 # find process name 3021 proc_card_lines = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read().split('\n') 3022 process = '' 3023 for line in proc_card_lines: 3024 if line.startswith('generate') or line.startswith('add process'): 3025 process = process+(line.replace('generate ', '')).replace('add process ','')+' ; ' 3026 lpp = {0:'l', 1:'p', -1:'pbar', 2:'elastic photon from p', 3:'elastic photon from e'} 3027 if self.ninitial == 1: 3028 proc_info = '\n Process %s' % process[:-3] 3029 else: 3030 proc_info = '\n Process %s\n Run at %s-%s collider (%s + %s GeV)' % \ 3031 (process[:-3], lpp[self.run_card['lpp1']], lpp[self.run_card['lpp2']], 3032 self.run_card['ebeam1'], self.run_card['ebeam2']) 3033 3034 if self.ninitial == 1: 3035 self.cross_sect_dict['unit']='GeV' 3036 self.cross_sect_dict['xsec_string']='(Partial) decay width' 3037 self.cross_sect_dict['axsec_string']='(Partial) abs(decay width)' 3038 else: 3039 self.cross_sect_dict['unit']='pb' 3040 self.cross_sect_dict['xsec_string']='Total cross section' 3041 self.cross_sect_dict['axsec_string']='Total abs(cross section)' 3042 if self.run_card['event_norm'].lower()=='bias': 3043 self.cross_sect_dict['xsec_string']+=', incl. bias (DO NOT USE)' 3044 3045 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3046 status = ['Determining the number of unweighted events per channel', 3047 'Updating the number of unweighted events per channel', 3048 'Summary:'] 3049 computed='(computed from LHE events)' 3050 elif mode in ['NLO', 'LO']: 3051 status = ['Results after grid setup:','Current results:', 3052 'Final results and run summary:'] 3053 computed='(computed from histogram information)' 3054 3055 if step != 2 and mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3056 message = status[step] + '\n\n Intermediate results:' + \ 3057 ('\n Random seed: %(randinit)d' + \ 3058 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' + \ 3059 '\n %(axsec_string)s: %(xseca)8.3e +- %(erra)6.1e %(unit)s \n') \ 3060 % self.cross_sect_dict 3061 elif mode in ['NLO','LO'] and not done: 3062 if step == 0: 3063 message = '\n ' + status[0] + \ 3064 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3065 self.cross_sect_dict 3066 else: 3067 message = '\n ' + status[1] + \ 3068 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3069 self.cross_sect_dict 3070 3071 else: 3072 message = '\n --------------------------------------------------------------' 3073 message = message + \ 3074 '\n ' + status[2] + proc_info 3075 if mode not in ['LO', 'NLO']: 3076 message = message + \ 3077 '\n Number of events generated: %s' % self.run_card['nevents'] 3078 message = message + \ 3079 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3080 self.cross_sect_dict 3081 message = message + \ 3082 '\n --------------------------------------------------------------' 3083 if scale_pdf_info and (self.run_card['nevents']>=10000 or mode in ['NLO', 'LO']): 3084 if scale_pdf_info[0]: 3085 # scale uncertainties 3086 message = message + '\n Scale variation %s:' % computed 3087 for s in scale_pdf_info[0]: 3088 if s['unc']: 3089 if self.run_card['ickkw'] != -1: 3090 message = message + \ 3091 ('\n Dynamical_scale_choice %(label)i (envelope of %(size)s values): '\ 3092 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % s 3093 else: 3094 message = message + \ 3095 ('\n Soft and hard scale dependence (added in quadrature): '\ 3096 '\n %(cen)8.3e pb +%(max_q)0.1f%% -%(min_q)0.1f%%') % s 3097 3098 else: 3099 message = message + \ 3100 ('\n Dynamical_scale_choice %(label)i: '\ 3101 '\n %(cen)8.3e pb') % s 3102 3103 if scale_pdf_info[1]: 3104 message = message + '\n PDF variation %s:' % computed 3105 for p in scale_pdf_info[1]: 3106 if p['unc']=='none': 3107 message = message + \ 3108 ('\n %(name)s (central value only): '\ 3109 '\n %(cen)8.3e pb') % p 3110 3111 elif p['unc']=='unknown': 3112 message = message + \ 3113 ('\n %(name)s (%(size)s members; combination method unknown): '\ 3114 '\n %(cen)8.3e pb') % p 3115 else: 3116 message = message + \ 3117 ('\n %(name)s (%(size)s members; using %(unc)s method): '\ 3118 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % p 3119 # pdf uncertainties 3120 message = message + \ 3121 '\n --------------------------------------------------------------' 3122 3123 3124 if (mode in ['NLO', 'LO'] and not done) or \ 3125 (mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] and step!=2): 3126 logger.info(message+'\n') 3127 return 3128 3129 # Some advanced general statistics are shown in the debug message at the 3130 # end of the run 3131 # Make sure it never stops a run 3132 # Gather some basic statistics for the run and extracted from the log files. 3133 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3134 log_GV_files = misc.glob(pjoin('P*','G*','log_MINT*.txt'), 3135 pjoin(self.me_dir, 'SubProcesses')) 3136 all_log_files = log_GV_files 3137 elif mode == 'NLO': 3138 log_GV_files = misc.glob(pjoin('P*','all_G*','log_MINT*.txt'), 3139 pjoin(self.me_dir, 'SubProcesses')) 3140 all_log_files = log_GV_files 3141 3142 elif mode == 'LO': 3143 log_GV_files = '' 3144 all_log_files = misc.glob(pjoin('P*','born_G*','log_MINT*.txt'), 3145 pjoin(self.me_dir, 'SubProcesses')) 3146 else: 3147 raise aMCatNLOError, 'Running mode %s not supported.'%mode 3148 3149 try: 3150 message, debug_msg = \ 3151 self.compile_advanced_stats(log_GV_files, all_log_files, message) 3152 except Exception as e: 3153 debug_msg = 'Advanced statistics collection failed with error "%s"\n'%str(e) 3154 err_string = StringIO.StringIO() 3155 traceback.print_exc(limit=4, file=err_string) 3156 debug_msg += 'Please report this backtrace to a MadGraph developer:\n%s'\ 3157 %err_string.getvalue() 3158 3159 logger.debug(debug_msg+'\n') 3160 logger.info(message+'\n') 3161 3162 # Now copy relevant information in the Events/Run_<xxx> directory 3163 evt_path = pjoin(self.me_dir, 'Events', self.run_name) 3164 open(pjoin(evt_path, 'summary.txt'),'w').write(message+'\n') 3165 open(pjoin(evt_path, '.full_summary.txt'), 3166 'w').write(message+'\n\n'+debug_msg+'\n') 3167 3168 self.archive_files(evt_path,mode)
3169
3170 - def archive_files(self, evt_path, mode):
3171 """ Copies in the Events/Run_<xxx> directory relevant files characterizing 3172 the run.""" 3173 3174 files_to_arxiv = [pjoin('Cards','param_card.dat'), 3175 pjoin('Cards','MadLoopParams.dat'), 3176 pjoin('Cards','FKS_params.dat'), 3177 pjoin('Cards','run_card.dat'), 3178 pjoin('Subprocesses','setscales.f'), 3179 pjoin('Subprocesses','cuts.f')] 3180 3181 if mode in ['NLO', 'LO']: 3182 files_to_arxiv.append(pjoin('Cards','FO_analyse_card.dat')) 3183 3184 if not os.path.exists(pjoin(evt_path,'RunMaterial')): 3185 os.mkdir(pjoin(evt_path,'RunMaterial')) 3186 3187 for path in files_to_arxiv: 3188 if os.path.isfile(pjoin(self.me_dir,path)): 3189 files.cp(pjoin(self.me_dir,path),pjoin(evt_path,'RunMaterial')) 3190 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'],cwd=evt_path) 3191 shutil.rmtree(pjoin(evt_path,'RunMaterial'))
3192
3193 - def compile_advanced_stats(self,log_GV_files,all_log_files,message):
3194 """ This functions goes through the log files given in arguments and 3195 compiles statistics about MadLoop stability, virtual integration 3196 optimization and detection of potential error messages into a nice 3197 debug message to printed at the end of the run """ 3198 3199 def safe_float(str_float): 3200 try: 3201 return float(str_float) 3202 except ValueError: 3203 logger.debug('Could not convert the following float during'+ 3204 ' advanced statistics printout: %s'%str(str_float)) 3205 return -1.0
3206 3207 3208 # > UPS is a dictionary of tuples with this format {channel:[nPS,nUPS]} 3209 # > Errors is a list of tuples with this format (log_file,nErrors) 3210 stats = {'UPS':{}, 'Errors':[], 'virt_stats':{}, 'timings':{}} 3211 mint_search = re.compile(r"MINT(?P<ID>\d*).txt") 3212 3213 # ================================== 3214 # == MadLoop stability statistics == 3215 # ================================== 3216 3217 # Recuperate the fraction of unstable PS points found in the runs for 3218 # the virtuals 3219 UPS_stat_finder = re.compile( 3220 r"Satistics from MadLoop:.*"+\ 3221 r"Total points tried\:\s+(?P<ntot>\d+).*"+\ 3222 r"Stability unknown\:\s+(?P<nsun>\d+).*"+\ 3223 r"Stable PS point\:\s+(?P<nsps>\d+).*"+\ 3224 r"Unstable PS point \(and rescued\)\:\s+(?P<nups>\d+).*"+\ 3225 r"Exceptional PS point \(unstable and not rescued\)\:\s+(?P<neps>\d+).*"+\ 3226 r"Double precision used\:\s+(?P<nddp>\d+).*"+\ 3227 r"Quadruple precision used\:\s+(?P<nqdp>\d+).*"+\ 3228 r"Initialization phase\-space points\:\s+(?P<nini>\d+).*"+\ 3229 r"Unknown return code \(100\)\:\s+(?P<n100>\d+).*"+\ 3230 r"Unknown return code \(10\)\:\s+(?P<n10>\d+).*",re.DOTALL) 3231 3232 unit_code_meaning = { 0 : 'Not identified (CTModeRun != -1)', 3233 1 : 'CutTools (double precision)', 3234 2 : 'PJFry++', 3235 3 : 'IREGI', 3236 4 : 'Golem95', 3237 5 : 'Samurai', 3238 6 : 'Ninja (double precision)', 3239 7 : 'COLLIER', 3240 8 : 'Ninja (quadruple precision)', 3241 9 : 'CutTools (quadruple precision)'} 3242 RetUnit_finder =re.compile( 3243 r"#Unit\s*(?P<unit>\d+)\s*=\s*(?P<n_occurences>\d+)") 3244 #Unit 3245 3246 for gv_log in log_GV_files: 3247 channel_name = '/'.join(gv_log.split('/')[-5:-1]) 3248 log=open(gv_log,'r').read() 3249 UPS_stats = re.search(UPS_stat_finder,log) 3250 for retunit_stats in re.finditer(RetUnit_finder, log): 3251 if channel_name not in stats['UPS'].keys(): 3252 stats['UPS'][channel_name] = [0]*10+[[0]*10] 3253 stats['UPS'][channel_name][10][int(retunit_stats.group('unit'))] \ 3254 += int(retunit_stats.group('n_occurences')) 3255 if not UPS_stats is None: 3256 try: 3257 stats['UPS'][channel_name][0] += int(UPS_stats.group('ntot')) 3258 stats['UPS'][channel_name][1] += int(UPS_stats.group('nsun')) 3259 stats['UPS'][channel_name][2] += int(UPS_stats.group('nsps')) 3260 stats['UPS'][channel_name][3] += int(UPS_stats.group('nups')) 3261 stats['UPS'][channel_name][4] += int(UPS_stats.group('neps')) 3262 stats['UPS'][channel_name][5] += int(UPS_stats.group('nddp')) 3263 stats['UPS'][channel_name][6] += int(UPS_stats.group('nqdp')) 3264 stats['UPS'][channel_name][7] += int(UPS_stats.group('nini')) 3265 stats['UPS'][channel_name][8] += int(UPS_stats.group('n100')) 3266 stats['UPS'][channel_name][9] += int(UPS_stats.group('n10')) 3267 except KeyError: 3268 stats['UPS'][channel_name] = [int(UPS_stats.group('ntot')), 3269 int(UPS_stats.group('nsun')),int(UPS_stats.group('nsps')), 3270 int(UPS_stats.group('nups')),int(UPS_stats.group('neps')), 3271 int(UPS_stats.group('nddp')),int(UPS_stats.group('nqdp')), 3272 int(UPS_stats.group('nini')),int(UPS_stats.group('n100')), 3273 int(UPS_stats.group('n10')),[0]*10] 3274 debug_msg = "" 3275 if len(stats['UPS'].keys())>0: 3276 nTotPS = sum([chan[0] for chan in stats['UPS'].values()],0) 3277 nTotsun = sum([chan[1] for chan in stats['UPS'].values()],0) 3278 nTotsps = sum([chan[2] for chan in stats['UPS'].values()],0) 3279 nTotups = sum([chan[3] for chan in stats['UPS'].values()],0) 3280 nToteps = sum([chan[4] for chan in stats['UPS'].values()],0) 3281 nTotddp = sum([chan[5] for chan in stats['UPS'].values()],0) 3282 nTotqdp = sum([chan[6] for chan in stats['UPS'].values()],0) 3283 nTotini = sum([chan[7] for chan in stats['UPS'].values()],0) 3284 nTot100 = sum([chan[8] for chan in stats['UPS'].values()],0) 3285 nTot10 = sum([chan[9] for chan in stats['UPS'].values()],0) 3286 nTot1 = [sum([chan[10][i] for chan in stats['UPS'].values()],0) \ 3287 for i in range(10)] 3288 UPSfracs = [(chan[0] , 0.0 if chan[1][0]==0 else \ 3289 safe_float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()] 3290 maxUPS = max(UPSfracs, key = lambda w: w[1]) 3291 3292 tmpStr = "" 3293 tmpStr += '\n Number of loop ME evaluations (by MadLoop): %d'%nTotPS 3294 tmpStr += '\n Stability unknown: %d'%nTotsun 3295 tmpStr += '\n Stable PS point: %d'%nTotsps 3296 tmpStr += '\n Unstable PS point (and rescued): %d'%nTotups 3297 tmpStr += '\n Unstable PS point (and not rescued): %d'%nToteps 3298 tmpStr += '\n Only double precision used: %d'%nTotddp 3299 tmpStr += '\n Quadruple precision used: %d'%nTotqdp 3300 tmpStr += '\n Initialization phase-space points: %d'%nTotini 3301 tmpStr += '\n Reduction methods used:' 3302 red_methods = [(unit_code_meaning[i],nTot1[i]) for i in \ 3303 unit_code_meaning.keys() if nTot1[i]>0] 3304 for method, n in sorted(red_methods, key= lambda l: l[1], reverse=True): 3305 tmpStr += '\n > %s%s%s'%(method,' '*(33-len(method)),n) 3306 if nTot100 != 0: 3307 debug_msg += '\n Unknown return code (100): %d'%nTot100 3308 if nTot10 != 0: 3309 debug_msg += '\n Unknown return code (10): %d'%nTot10 3310 nUnknownUnit = sum(nTot1[u] for u in range(10) if u \ 3311 not in unit_code_meaning.keys()) 3312 if nUnknownUnit != 0: 3313 debug_msg += '\n Unknown return code (1): %d'\ 3314 %nUnknownUnit 3315 3316 if maxUPS[1]>0.001: 3317 message += tmpStr 3318 message += '\n Total number of unstable PS point detected:'+\ 3319 ' %d (%4.2f%%)'%(nToteps,safe_float(100*nToteps)/nTotPS) 3320 message += '\n Maximum fraction of UPS points in '+\ 3321 'channel %s (%4.2f%%)'%maxUPS 3322 message += '\n Please report this to the authors while '+\ 3323 'providing the file' 3324 message += '\n %s'%str(pjoin(os.path.dirname(self.me_dir), 3325 maxUPS[0],'UPS.log')) 3326 else: 3327 debug_msg += tmpStr 3328 3329 3330 # ==================================================== 3331 # == aMC@NLO virtual integration optimization stats == 3332 # ==================================================== 3333 3334 virt_tricks_finder = re.compile( 3335 r"accumulated results Virtual ratio\s*=\s*-?(?P<v_ratio>[\d\+-Eed\.]*)"+\ 3336 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_ratio_err>[\d\+-Eed\.]*)\s*\%\)\s*\n"+\ 3337 r"accumulated results ABS virtual\s*=\s*-?(?P<v_abs_contr>[\d\+-Eed\.]*)"+\ 3338 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_abs_contr_err>[\d\+-Eed\.]*)\s*\%\)") 3339 3340 virt_frac_finder = re.compile(r"update virtual fraction to\s*:\s*"+\ 3341 "-?(?P<v_frac>[\d\+-Eed\.]*)\s*-?(?P<v_average>[\d\+-Eed\.]*)") 3342 3343 channel_contr_finder = re.compile(r"Final result \[ABS\]\s*:\s*-?(?P<v_contr>[\d\+-Eed\.]*)") 3344 3345 channel_contr_list = {} 3346 for gv_log in log_GV_files: 3347 logfile=open(gv_log,'r') 3348 log = logfile.read() 3349 logfile.close() 3350 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3351 vf_stats = None 3352 for vf_stats in re.finditer(virt_frac_finder, log): 3353 pass 3354 if not vf_stats is None: 3355 v_frac = safe_float(vf_stats.group('v_frac')) 3356 v_average = safe_float(vf_stats.group('v_average')) 3357 try: 3358 if v_frac < stats['virt_stats']['v_frac_min'][0]: 3359 stats['virt_stats']['v_frac_min']=(v_frac,channel_name) 3360 if v_frac > stats['virt_stats']['v_frac_max'][0]: 3361 stats['virt_stats']['v_frac_max']=(v_frac,channel_name) 3362 stats['virt_stats']['v_frac_avg'][0] += v_frac 3363 stats['virt_stats']['v_frac_avg'][1] += 1 3364 except KeyError: 3365 stats['virt_stats']['v_frac_min']=[v_frac,channel_name] 3366 stats['virt_stats']['v_frac_max']=[v_frac,channel_name] 3367 stats['virt_stats']['v_frac_avg']=[v_frac,1] 3368 3369 3370 ccontr_stats = None 3371 for ccontr_stats in re.finditer(channel_contr_finder, log): 3372 pass 3373 if not ccontr_stats is None: 3374 contrib = safe_float(ccontr_stats.group('v_contr')) 3375 try: 3376 if contrib>channel_contr_list[channel_name]: 3377 channel_contr_list[channel_name]=contrib 3378 except KeyError: 3379 channel_contr_list[channel_name]=contrib 3380 3381 3382 # Now build the list of relevant virt log files to look for the maxima 3383 # of virt fractions and such. 3384 average_contrib = 0.0 3385 for value in channel_contr_list.values(): 3386 average_contrib += value 3387 if len(channel_contr_list.values()) !=0: 3388 average_contrib = average_contrib / len(channel_contr_list.values()) 3389 3390 relevant_log_GV_files = [] 3391 excluded_channels = set([]) 3392 all_channels = set([]) 3393 for log_file in log_GV_files: 3394 channel_name = '/'.join(log_file.split('/')[-3:-1]) 3395 all_channels.add(channel_name) 3396 try: 3397 if channel_contr_list[channel_name] > (0.1*average_contrib): 3398 relevant_log_GV_files.append(log_file) 3399 else: 3400 excluded_channels.add(channel_name) 3401 except KeyError: 3402 relevant_log_GV_files.append(log_file) 3403 3404 # Now we want to use the latest occurence of accumulated result in the log file 3405 for gv_log in relevant_log_GV_files: 3406 logfile=open(gv_log,'r') 3407 log = logfile.read() 3408 logfile.close() 3409 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3410 3411 vt_stats = None 3412 for vt_stats in re.finditer(virt_tricks_finder, log): 3413 pass 3414 if not vt_stats is None: 3415 vt_stats_group = vt_stats.groupdict() 3416 v_ratio = safe_float(vt_stats.group('v_ratio')) 3417 v_ratio_err = safe_float(vt_stats.group('v_ratio_err')) 3418 v_contr = safe_float(vt_stats.group('v_abs_contr')) 3419 v_contr_err = safe_float(vt_stats.group('v_abs_contr_err')) 3420 try: 3421 if v_ratio < stats['virt_stats']['v_ratio_min'][0]: 3422 stats['virt_stats']['v_ratio_min']=(v_ratio,channel_name) 3423 if v_ratio > stats['virt_stats']['v_ratio_max'][0]: 3424 stats['virt_stats']['v_ratio_max']=(v_ratio,channel_name) 3425 if v_ratio < stats['virt_stats']['v_ratio_err_min'][0]: 3426 stats['virt_stats']['v_ratio_err_min']=(v_ratio_err,channel_name) 3427 if v_ratio > stats['virt_stats']['v_ratio_err_max'][0]: 3428 stats['virt_stats']['v_ratio_err_max']=(v_ratio_err,channel_name) 3429 if v_contr < stats['virt_stats']['v_contr_min'][0]: 3430 stats['virt_stats']['v_contr_min']=(v_contr,channel_name) 3431 if v_contr > stats['virt_stats']['v_contr_max'][0]: 3432 stats['virt_stats']['v_contr_max']=(v_contr,channel_name) 3433 if v_contr_err < stats['virt_stats']['v_contr_err_min'][0]: 3434 stats['virt_stats']['v_contr_err_min']=(v_contr_err,channel_name) 3435 if v_contr_err > stats['virt_stats']['v_contr_err_max'][0]: 3436 stats['virt_stats']['v_contr_err_max']=(v_contr_err,channel_name) 3437 except KeyError: 3438 stats['virt_stats']['v_ratio_min']=[v_ratio,channel_name] 3439 stats['virt_stats']['v_ratio_max']=[v_ratio,channel_name] 3440 stats['virt_stats']['v_ratio_err_min']=[v_ratio_err,channel_name] 3441 stats['virt_stats']['v_ratio_err_max']=[v_ratio_err,channel_name] 3442 stats['virt_stats']['v_contr_min']=[v_contr,channel_name] 3443 stats['virt_stats']['v_contr_max']=[v_contr,channel_name] 3444 stats['virt_stats']['v_contr_err_min']=[v_contr_err,channel_name] 3445 stats['virt_stats']['v_contr_err_max']=[v_contr_err,channel_name] 3446 3447 vf_stats = None 3448 for vf_stats in re.finditer(virt_frac_finder, log): 3449 pass 3450 if not vf_stats is None: 3451 v_frac = safe_float(vf_stats.group('v_frac')) 3452 v_average = safe_float(vf_stats.group('v_average')) 3453 try: 3454 if v_average < stats['virt_stats']['v_average_min'][0]: 3455 stats['virt_stats']['v_average_min']=(v_average,channel_name) 3456 if v_average > stats['virt_stats']['v_average_max'][0]: 3457 stats['virt_stats']['v_average_max']=(v_average,channel_name) 3458 stats['virt_stats']['v_average_avg'][0] += v_average 3459 stats['virt_stats']['v_average_avg'][1] += 1 3460 except KeyError: 3461 stats['virt_stats']['v_average_min']=[v_average,channel_name] 3462 stats['virt_stats']['v_average_max']=[v_average,channel_name] 3463 stats['virt_stats']['v_average_avg']=[v_average,1] 3464 3465 try: 3466 debug_msg += '\n\n Statistics on virtual integration optimization : ' 3467 3468 debug_msg += '\n Maximum virt fraction computed %.3f (%s)'\ 3469 %tuple(stats['virt_stats']['v_frac_max']) 3470 debug_msg += '\n Minimum virt fraction computed %.3f (%s)'\ 3471 %tuple(stats['virt_stats']['v_frac_min']) 3472 debug_msg += '\n Average virt fraction computed %.3f'\ 3473 %safe_float(stats['virt_stats']['v_frac_avg'][0]/safe_float(stats['virt_stats']['v_frac_avg'][1])) 3474 debug_msg += '\n Stats below exclude negligible channels (%d excluded out of %d)'%\ 3475 (len(excluded_channels),len(all_channels)) 3476 debug_msg += '\n Maximum virt ratio used %.2f (%s)'\ 3477 %tuple(stats['virt_stats']['v_average_max']) 3478 debug_msg += '\n Maximum virt ratio found from grids %.2f (%s)'\ 3479 %tuple(stats['virt_stats']['v_ratio_max']) 3480 tmpStr = '\n Max. MC err. on virt ratio from grids %.1f %% (%s)'\ 3481 %tuple(stats['virt_stats']['v_ratio_err_max']) 3482 debug_msg += tmpStr 3483 # After all it was decided that it is better not to alarm the user unecessarily 3484 # with such printout of the statistics. 3485 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0 or \ 3486 # stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3487 # message += "\n Suspiciously large MC error in :" 3488 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3489 # message += tmpStr 3490 3491 tmpStr = '\n Maximum MC error on abs virt %.1f %% (%s)'\ 3492 %tuple(stats['virt_stats']['v_contr_err_max']) 3493 debug_msg += tmpStr 3494 # if stats['virt_stats']['v_contr_err_max'][0]>100.0: 3495 # message += tmpStr 3496 3497 3498 except KeyError: 3499 debug_msg += '\n Could not find statistics on the integration optimization. ' 3500 3501 # ======================================= 3502 # == aMC@NLO timing profile statistics == 3503 # ======================================= 3504 3505 timing_stat_finder = re.compile(r"\s*Time spent in\s*(?P<name>\w*)\s*:\s*"+\ 3506 "(?P<time>[\d\+-Eed\.]*)\s*") 3507 3508 for logf in log_GV_files: 3509 logfile=open(logf,'r') 3510 log = logfile.read() 3511 logfile.close() 3512 channel_name = '/'.join(logf.split('/')[-3:-1]) 3513 mint = re.search(mint_search,logf) 3514 if not mint is None: 3515 channel_name = channel_name+' [step %s]'%mint.group('ID') 3516 3517 for time_stats in re.finditer(timing_stat_finder, log): 3518 try: 3519 stats['timings'][time_stats.group('name')][channel_name]+=\ 3520 safe_float(time_stats.group('time')) 3521 except KeyError: 3522 if time_stats.group('name') not in stats['timings'].keys(): 3523 stats['timings'][time_stats.group('name')] = {} 3524 stats['timings'][time_stats.group('name')][channel_name]=\ 3525 safe_float(time_stats.group('time')) 3526 3527 # useful inline function 3528 Tstr = lambda secs: str(datetime.timedelta(seconds=int(secs))) 3529 try: 3530 totTimeList = [(time, chan) for chan, time in \ 3531 stats['timings']['Total'].items()] 3532 except KeyError: 3533 totTimeList = [] 3534 3535 totTimeList.sort() 3536 if len(totTimeList)>0: 3537 debug_msg += '\n\n Inclusive timing profile :' 3538 debug_msg += '\n Overall slowest channel %s (%s)'%\ 3539 (Tstr(totTimeList[-1][0]),totTimeList[-1][1]) 3540 debug_msg += '\n Average channel running time %s'%\ 3541 Tstr(sum([el[0] for el in totTimeList])/len(totTimeList)) 3542 debug_msg += '\n Aggregated total running time %s'%\ 3543 Tstr(sum([el[0] for el in totTimeList])) 3544 else: 3545 debug_msg += '\n\n Inclusive timing profile non available.' 3546 3547 sorted_keys = sorted(stats['timings'].keys(), key= lambda stat: \ 3548 sum(stats['timings'][stat].values()), reverse=True) 3549 for name in sorted_keys: 3550 if name=='Total': 3551 continue 3552 if sum(stats['timings'][name].values())<=0.0: 3553 debug_msg += '\n Zero time record for %s.'%name 3554 continue 3555 try: 3556 TimeList = [((100.0*time/stats['timings']['Total'][chan]), 3557 chan) for chan, time in stats['timings'][name].items()] 3558 except KeyError, ZeroDivisionError: 3559 debug_msg += '\n\n Timing profile for %s unavailable.'%name 3560 continue 3561 TimeList.sort() 3562 debug_msg += '\n Timing profile for <%s> :'%name 3563 try: 3564 debug_msg += '\n Overall fraction of time %.3f %%'%\ 3565 safe_float((100.0*(sum(stats['timings'][name].values())/ 3566 sum(stats['timings']['Total'].values())))) 3567 except KeyError, ZeroDivisionError: 3568 debug_msg += '\n Overall fraction of time unavailable.' 3569 debug_msg += '\n Largest fraction of time %.3f %% (%s)'%\ 3570 (TimeList[-1][0],TimeList[-1][1]) 3571 debug_msg += '\n Smallest fraction of time %.3f %% (%s)'%\ 3572 (TimeList[0][0],TimeList[0][1]) 3573 3574 # ============================= 3575 # == log file eror detection == 3576 # ============================= 3577 3578 # Find the number of potential errors found in all log files 3579 # This re is a simple match on a case-insensitve 'error' but there is 3580 # also some veto added for excluding the sentence 3581 # "See Section 6 of paper for error calculation." 3582 # which appear in the header of lhapdf in the logs. 3583 err_finder = re.compile(\ 3584 r"(?<!of\spaper\sfor\s)\bERROR\b(?!\scalculation\.)",re.IGNORECASE) 3585 for log in all_log_files: 3586 logfile=open(log,'r') 3587 nErrors = len(re.findall(err_finder, logfile.read())) 3588 logfile.close() 3589 if nErrors != 0: 3590 stats['Errors'].append((str(log),nErrors)) 3591 3592 nErrors = sum([err[1] for err in stats['Errors']],0) 3593 if nErrors != 0: 3594 debug_msg += '\n WARNING:: A total of %d error%s ha%s been '\ 3595 %(nErrors,'s' if nErrors>1 else '','ve' if nErrors>1 else 's')+\ 3596 'found in the following log file%s:'%('s' if \ 3597 len(stats['Errors'])>1 else '') 3598 for error in stats['Errors'][:3]: 3599 log_name = '/'.join(error[0].split('/')[-5:]) 3600 debug_msg += '\n > %d error%s in %s'%\ 3601 (error[1],'s' if error[1]>1 else '',log_name) 3602 if len(stats['Errors'])>3: 3603 nRemainingErrors = sum([err[1] for err in stats['Errors']][3:],0) 3604 nRemainingLogs = len(stats['Errors'])-3 3605 debug_msg += '\n And another %d error%s in %d other log file%s'%\ 3606 (nRemainingErrors, 's' if nRemainingErrors>1 else '', 3607 nRemainingLogs, 's ' if nRemainingLogs>1 else '') 3608 3609 return message, debug_msg 3610 3611
3612 - def reweight_and_collect_events(self, options, mode, nevents, event_norm):
3613 """this function calls the reweighting routines and creates the event file in the 3614 Event dir. Return the name of the event file created 3615 """ 3616 scale_pdf_info=[] 3617 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 3618 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1\ 3619 or self.run_card['store_rwgt_info']: 3620 scale_pdf_info = self.run_reweight(options['reweightonly']) 3621 self.update_status('Collecting events', level='parton', update_results=True) 3622 misc.compile(['collect_events'], 3623 cwd=pjoin(self.me_dir, 'SubProcesses'), nocompile=options['nocompile']) 3624 p = misc.Popen(['./collect_events'], cwd=pjoin(self.me_dir, 'SubProcesses'), 3625 stdin=subprocess.PIPE, 3626 stdout=open(pjoin(self.me_dir, 'collect_events.log'), 'w')) 3627 if event_norm.lower() == 'sum': 3628 p.communicate(input = '1\n') 3629 elif event_norm.lower() == 'unity': 3630 p.communicate(input = '3\n') 3631 elif event_norm.lower() == 'bias': 3632 p.communicate(input = '0\n') 3633 else: 3634 p.communicate(input = '2\n') 3635 3636 #get filename from collect events 3637 filename = open(pjoin(self.me_dir, 'collect_events.log')).read().split()[-1] 3638 3639 if not os.path.exists(pjoin(self.me_dir, 'SubProcesses', filename)): 3640 raise aMCatNLOError('An error occurred during event generation. ' + \ 3641 'The event file has not been created. Check collect_events.log') 3642 evt_file = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 3643 misc.gzip(pjoin(self.me_dir, 'SubProcesses', filename), stdout=evt_file) 3644 if not options['reweightonly']: 3645 self.print_summary(options, 2, mode, scale_pdf_info) 3646 res_files = misc.glob('res*.txt', pjoin(self.me_dir, 'SubProcesses')) 3647 for res_file in res_files: 3648 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 3649 3650 logger.info('The %s file has been generated.\n' % (evt_file)) 3651 self.results.add_detail('nb_event', nevents) 3652 self.update_status('Events generated', level='parton', update_results=True) 3653 return evt_file[:-3]
3654 3655
3656 - def run_mcatnlo(self, evt_file, options):
3657 """runs mcatnlo on the generated event file, to produce showered-events 3658 """ 3659 logger.info('Preparing MCatNLO run') 3660 try: 3661 misc.gunzip(evt_file) 3662 except Exception: 3663 pass 3664 3665 self.banner = banner_mod.Banner(evt_file) 3666 shower = self.banner.get_detail('run_card', 'parton_shower').upper() 3667 3668 #check that the number of split event files divides the number of 3669 # events, otherwise set it to 1 3670 if int(self.banner.get_detail('run_card', 'nevents') / \ 3671 self.shower_card['nsplit_jobs']) * self.shower_card['nsplit_jobs'] \ 3672 != self.banner.get_detail('run_card', 'nevents'): 3673 logger.warning(\ 3674 'nsplit_jobs in the shower card is not a divisor of the number of events.\n' + \ 3675 'Setting it to 1.') 3676 self.shower_card['nsplit_jobs'] = 1 3677 3678 # don't split jobs if the user asks to shower only a part of the events 3679 if self.shower_card['nevents'] > 0 and \ 3680 self.shower_card['nevents'] < self.banner.get_detail('run_card', 'nevents') and \ 3681 self.shower_card['nsplit_jobs'] != 1: 3682 logger.warning(\ 3683 'Only a part of the events will be showered.\n' + \ 3684 'Setting nsplit_jobs in the shower_card to 1.') 3685 self.shower_card['nsplit_jobs'] = 1 3686 3687 self.banner_to_mcatnlo(evt_file) 3688 3689 # if fastjet has to be linked (in extralibs) then 3690 # add lib /include dirs for fastjet if fastjet-config is present on the 3691 # system, otherwise add fjcore to the files to combine 3692 if 'fastjet' in self.shower_card['extralibs']: 3693 #first, check that stdc++ is also linked 3694 if not 'stdc++' in self.shower_card['extralibs']: 3695 logger.warning('Linking FastJet: adding stdc++ to EXTRALIBS') 3696 self.shower_card['extralibs'] += ' stdc++' 3697 # then check if options[fastjet] corresponds to a valid fj installation 3698 try: 3699 #this is for a complete fj installation 3700 p = subprocess.Popen([self.options['fastjet'], '--prefix'], \ 3701 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 3702 output, error = p.communicate() 3703 #remove the line break from output (last character) 3704 output = output[:-1] 3705 # add lib/include paths 3706 if not pjoin(output, 'lib') in self.shower_card['extrapaths']: 3707 logger.warning('Linking FastJet: updating EXTRAPATHS') 3708 self.shower_card['extrapaths'] += ' ' + pjoin(output, 'lib') 3709 if not pjoin(output, 'include') in self.shower_card['includepaths']: 3710 logger.warning('Linking FastJet: updating INCLUDEPATHS') 3711 self.shower_card['includepaths'] += ' ' + pjoin(output, 'include') 3712 # to be changed in the fortran wrapper 3713 include_line = '#include "fastjet/ClusterSequence.hh"//INCLUDE_FJ' 3714 namespace_line = 'namespace fj = fastjet;//NAMESPACE_FJ' 3715 except Exception: 3716 logger.warning('Linking FastJet: using fjcore') 3717 # this is for FJcore, so no FJ library has to be linked 3718 self.shower_card['extralibs'] = self.shower_card['extralibs'].replace('fastjet', '') 3719 if not 'fjcore.o' in self.shower_card['analyse']: 3720 self.shower_card['analyse'] += ' fjcore.o' 3721 # to be changed in the fortran wrapper 3722 include_line = '#include "fjcore.hh"//INCLUDE_FJ' 3723 namespace_line = 'namespace fj = fjcore;//NAMESPACE_FJ' 3724 # change the fortran wrapper with the correct namespaces/include 3725 fjwrapper_lines = open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc')).read().split('\n') 3726 for line in fjwrapper_lines: 3727 if '//INCLUDE_FJ' in line: 3728 fjwrapper_lines[fjwrapper_lines.index(line)] = include_line 3729 if '//NAMESPACE_FJ' in line: 3730 fjwrapper_lines[fjwrapper_lines.index(line)] = namespace_line 3731 with open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc'), 'w') as fsock: 3732 fsock.write('\n'.join(fjwrapper_lines) + '\n') 3733 3734 extrapaths = self.shower_card['extrapaths'].split() 3735 3736 # check that the path needed by HW++ and PY8 are set if one uses these shower 3737 if shower in ['HERWIGPP', 'PYTHIA8']: 3738 path_dict = {'HERWIGPP': ['hepmc_path', 3739 'thepeg_path', 3740 'hwpp_path'], 3741 'PYTHIA8': ['pythia8_path']} 3742 3743 if not all([self.options[ppath] and os.path.exists(self.options[ppath]) for ppath in path_dict[shower]]): 3744 raise aMCatNLOError('Some paths are missing or invalid in the configuration file.\n' + \ 3745 ('Please make sure you have set these variables: %s' % ', '.join(path_dict[shower]))) 3746 3747 if shower == 'HERWIGPP': 3748 extrapaths.append(pjoin(self.options['hepmc_path'], 'lib')) 3749 self.shower_card['extrapaths'] += ' %s' % pjoin(self.options['hepmc_path'], 'lib') 3750 3751 # add the HEPMC path of the pythia8 installation 3752 if shower == 'PYTHIA8': 3753 hepmc = subprocess.Popen([pjoin(self.options['pythia8_path'], 'bin', 'pythia8-config'), '--hepmc2'], 3754 stdout = subprocess.PIPE).stdout.read().strip() 3755 #this gives all the flags, i.e. 3756 #-I/Path/to/HepMC/include -L/Path/to/HepMC/lib -lHepMC 3757 # we just need the path to the HepMC libraries 3758 extrapaths.append(hepmc.split()[1].replace('-L', '')) 3759 3760 if shower == 'PYTHIA8' and not os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 3761 extrapaths.append(pjoin(self.options['pythia8_path'], 'lib')) 3762 3763 # set the PATH for the dynamic libraries 3764 if sys.platform == 'darwin': 3765 ld_library_path = 'DYLD_LIBRARY_PATH' 3766 else: 3767 ld_library_path = 'LD_LIBRARY_PATH' 3768 if ld_library_path in os.environ.keys(): 3769 paths = os.environ[ld_library_path] 3770 else: 3771 paths = '' 3772 paths += ':' + ':'.join(extrapaths) 3773 os.putenv(ld_library_path, paths) 3774 3775 shower_card_path = pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat') 3776 self.shower_card.write_card(shower, shower_card_path) 3777 3778 # overwrite if shower_card_set.dat exists in MCatNLO 3779 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat')): 3780 files.mv(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat'), 3781 pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat')) 3782 3783 mcatnlo_log = pjoin(self.me_dir, 'mcatnlo.log') 3784 self.update_status('Compiling MCatNLO for %s...' % shower, level='shower') 3785 3786 3787 # libdl may be needded for pythia 82xx 3788 #if shower == 'PYTHIA8' and not \ 3789 # os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')) and \ 3790 # 'dl' not in self.shower_card['extralibs'].split(): 3791 # # 'dl' has to be linked with the extralibs 3792 # self.shower_card['extralibs'] += ' dl' 3793 # logger.warning("'dl' was added to extralibs from the shower_card.dat.\n" + \ 3794 # "It is needed for the correct running of PY8.2xx.\n" + \ 3795 # "If this library cannot be found on your system, a crash will occur.") 3796 3797 misc.call(['./MCatNLO_MadFKS.inputs'], stdout=open(mcatnlo_log, 'w'), 3798 stderr=open(mcatnlo_log, 'w'), 3799 cwd=pjoin(self.me_dir, 'MCatNLO'), 3800 close_fds=True) 3801 3802 exe = 'MCATNLO_%s_EXE' % shower 3803 if not os.path.exists(pjoin(self.me_dir, 'MCatNLO', exe)) and \ 3804 not os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe')): 3805 print open(mcatnlo_log).read() 3806 raise aMCatNLOError('Compilation failed, check %s for details' % mcatnlo_log) 3807 logger.info(' ... done') 3808 3809 # create an empty dir where to run 3810 count = 1 3811 while os.path.isdir(pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3812 (shower, count))): 3813 count += 1 3814 rundir = pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3815 (shower, count)) 3816 os.mkdir(rundir) 3817 files.cp(shower_card_path, rundir) 3818 3819 #look for the event files (don't resplit if one asks for the 3820 # same number of event files as in the previous run) 3821 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3822 if max(len(event_files), 1) != self.shower_card['nsplit_jobs']: 3823 logger.info('Cleaning old files and splitting the event file...') 3824 #clean the old files 3825 files.rm([f for f in event_files if 'events.lhe' not in f]) 3826 if self.shower_card['nsplit_jobs'] > 1: 3827 misc.compile(['split_events'], cwd = pjoin(self.me_dir, 'Utilities'), nocompile=options['nocompile']) 3828 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'split_events')], 3829 stdin=subprocess.PIPE, 3830 stdout=open(pjoin(self.me_dir, 'Events', self.run_name, 'split_events.log'), 'w'), 3831 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 3832 p.communicate(input = 'events.lhe\n%d\n' % self.shower_card['nsplit_jobs']) 3833 logger.info('Splitting done.') 3834 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3835 3836 event_files.sort() 3837 3838 self.update_status('Showering events...', level='shower') 3839 logger.info('(Running in %s)' % rundir) 3840 if shower != 'PYTHIA8': 3841 files.mv(pjoin(self.me_dir, 'MCatNLO', exe), rundir) 3842 files.mv(pjoin(self.me_dir, 'MCatNLO', 'MCATNLO_%s_input' % shower), rundir) 3843 else: 3844 # special treatment for pythia8 3845 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.cmd'), rundir) 3846 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe'), rundir) 3847 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): # this is PY8.1xxx 3848 files.ln(pjoin(self.options['pythia8_path'], 'examples', 'config.sh'), rundir) 3849 files.ln(pjoin(self.options['pythia8_path'], 'xmldoc'), rundir) 3850 else: # this is PY8.2xxx 3851 files.ln(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc'), rundir) 3852 #link the hwpp exe in the rundir 3853 if shower == 'HERWIGPP': 3854 try: 3855 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 3856 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++'), rundir) 3857 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 3858 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig'), rundir) 3859 except Exception: 3860 raise aMCatNLOError('The Herwig++ path set in the configuration file is not valid.') 3861 3862 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so')): 3863 files.cp(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so'), rundir) 3864 3865 files.ln(evt_file, rundir, 'events.lhe') 3866 for i, f in enumerate(event_files): 3867 files.ln(f, rundir,'events_%d.lhe' % (i + 1)) 3868 3869 if not self.shower_card['analyse']: 3870 # an hep/hepmc file as output 3871 out_id = 'HEP' 3872 else: 3873 # one or more .top file(s) as output 3874 if "HwU" in self.shower_card['analyse']: 3875 out_id = 'HWU' 3876 else: 3877 out_id = 'TOP' 3878 3879 # write the executable 3880 with open(pjoin(rundir, 'shower.sh'), 'w') as fsock: 3881 # set the PATH for the dynamic libraries 3882 if sys.platform == 'darwin': 3883 ld_library_path = 'DYLD_LIBRARY_PATH' 3884 else: 3885 ld_library_path = 'LD_LIBRARY_PATH' 3886 fsock.write(open(pjoin(self.me_dir, 'MCatNLO', 'shower_template.sh')).read() \ 3887 % {'ld_library_path': ld_library_path, 3888 'extralibs': ':'.join(extrapaths)}) 3889 subprocess.call(['chmod', '+x', pjoin(rundir, 'shower.sh')]) 3890 3891 if event_files: 3892 arg_list = [[shower, out_id, self.run_name, '%d' % (i + 1)] \ 3893 for i in range(len(event_files))] 3894 else: 3895 arg_list = [[shower, out_id, self.run_name]] 3896 3897 self.run_all({rundir: 'shower.sh'}, arg_list, 'shower') 3898 self.njobs = 1 3899 self.wait_for_complete('shower') 3900 3901 # now collect the results 3902 message = '' 3903 warning = '' 3904 to_gzip = [evt_file] 3905 if out_id == 'HEP': 3906 #copy the showered stdhep/hepmc file back in events 3907 if shower in ['PYTHIA8', 'HERWIGPP']: 3908 hep_format = 'HEPMC' 3909 ext = 'hepmc' 3910 else: 3911 hep_format = 'StdHEP' 3912 ext = 'hep' 3913 3914 hep_file = '%s_%s_0.%s.gz' % \ 3915 (pjoin(os.path.dirname(evt_file), 'events'), shower, ext) 3916 count = 0 3917 3918 # find the first available name for the output: 3919 # check existing results with or without event splitting 3920 while os.path.exists(hep_file) or \ 3921 os.path.exists(hep_file.replace('.%s.gz' % ext, '__1.%s.gz' % ext)) : 3922 count +=1 3923 hep_file = '%s_%s_%d.%s.gz' % \ 3924 (pjoin(os.path.dirname(evt_file), 'events'), shower, count, ext) 3925 3926 try: 3927 if self.shower_card['nsplit_jobs'] == 1: 3928 files.mv(os.path.join(rundir, 'events.%s.gz' % ext), hep_file) 3929 message = ('The file %s has been generated. \nIt contains showered' + \ 3930 ' and hadronized events in the %s format obtained' + \ 3931 ' showering the parton-level event file %s.gz with %s') % \ 3932 (hep_file, hep_format, evt_file, shower) 3933 else: 3934 hep_list = [] 3935 for i in range(self.shower_card['nsplit_jobs']): 3936 hep_list.append(hep_file.replace('.%s.gz' % ext, '__%d.%s.gz' % (i + 1, ext))) 3937 files.mv(os.path.join(rundir, 'events_%d.%s.gz' % (i + 1, ext)), hep_list[-1]) 3938 message = ('The following files have been generated:\n %s\nThey contain showered' + \ 3939 ' and hadronized events in the %s format obtained' + \ 3940 ' showering the (split) parton-level event file %s.gz with %s') % \ 3941 ('\n '.join(hep_list), hep_format, evt_file, shower) 3942 3943 except OSError, IOError: 3944 raise aMCatNLOError('No file has been generated, an error occurred.'+\ 3945 ' More information in %s' % pjoin(os.getcwd(), 'amcatnlo_run.log')) 3946 3947 # run the plot creation in a secure way 3948 if hep_format == 'StdHEP': 3949 try: 3950 self.do_plot('%s -f' % self.run_name) 3951 except Exception, error: 3952 logger.info("Fail to make the plot. Continue...") 3953 pass 3954 3955 elif out_id == 'TOP' or out_id == 'HWU': 3956 #copy the topdrawer or HwU file(s) back in events 3957 if out_id=='TOP': 3958 ext='top' 3959 elif out_id=='HWU': 3960 ext='HwU' 3961 topfiles = [] 3962 top_tars = [tarfile.TarFile(f) for f in misc.glob('histfile*.tar', rundir)] 3963 for top_tar in top_tars: 3964 topfiles.extend(top_tar.getnames()) 3965 3966 # safety check 3967 if len(top_tars) != self.shower_card['nsplit_jobs']: 3968 raise aMCatNLOError('%d job(s) expected, %d file(s) found' % \ 3969 (self.shower_card['nsplit_jobs'], len(top_tars))) 3970 3971 # find the first available name for the output: 3972 # check existing results with or without event splitting 3973 filename = 'plot_%s_%d_' % (shower, 1) 3974 count = 1 3975 while os.path.exists(pjoin(self.me_dir, 'Events', 3976 self.run_name, '%s0.%s' % (filename,ext))) or \ 3977 os.path.exists(pjoin(self.me_dir, 'Events', 3978 self.run_name, '%s0__1.%s' % (filename,ext))): 3979 count += 1 3980 filename = 'plot_%s_%d_' % (shower, count) 3981 3982 if out_id=='TOP': 3983 hist_format='TopDrawer format' 3984 elif out_id=='HWU': 3985 hist_format='HwU and GnuPlot formats' 3986 3987 if not topfiles: 3988 # if no topfiles are found just warn the user 3989 warning = 'No .top file has been generated. For the results of your ' +\ 3990 'run, please check inside %s' % rundir 3991 elif self.shower_card['nsplit_jobs'] == 1: 3992 # only one job for the shower 3993 top_tars[0].extractall(path = rundir) 3994 plotfiles = [] 3995 for i, file in enumerate(topfiles): 3996 if out_id=='TOP': 3997 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 3998 '%s%d.top' % (filename, i)) 3999 files.mv(pjoin(rundir, file), plotfile) 4000 elif out_id=='HWU': 4001 out=pjoin(self.me_dir,'Events', 4002 self.run_name,'%s%d'% (filename,i)) 4003 histos=[{'dirname':pjoin(rundir,file)}] 4004 self.combine_plots_HwU(histos,out) 4005 try: 4006 misc.call(['gnuplot','%s%d.gnuplot' % (filename,i)],\ 4007 stdout=os.open(os.devnull, os.O_RDWR),\ 4008 stderr=os.open(os.devnull, os.O_RDWR),\ 4009 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4010 except Exception: 4011 pass 4012 plotfile=pjoin(self.me_dir,'Events',self.run_name, 4013 '%s%d.HwU'% (filename,i)) 4014 plotfiles.append(plotfile) 4015 4016 ffiles = 'files' 4017 have = 'have' 4018 if len(plotfiles) == 1: 4019 ffiles = 'file' 4020 have = 'has' 4021 4022 message = ('The %s %s %s been generated, with histograms in the' + \ 4023 ' %s, obtained by showering the parton-level' + \ 4024 ' file %s.gz with %s.') % (ffiles, ', '.join(plotfiles), have, \ 4025 hist_format, evt_file, shower) 4026 else: 4027 # many jobs for the shower have been run 4028 topfiles_set = set(topfiles) 4029 plotfiles = [] 4030 for j, top_tar in enumerate(top_tars): 4031 top_tar.extractall(path = rundir) 4032 for i, file in enumerate(topfiles_set): 4033 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 4034 '%s%d__%d.%s' % (filename, i, j + 1,ext)) 4035 files.mv(pjoin(rundir, file), plotfile) 4036 plotfiles.append(plotfile) 4037 4038 # check if the user asked to combine the .top into a single file 4039 if self.shower_card['combine_td']: 4040 misc.compile(['sum_plots'], cwd = pjoin(self.me_dir, 'Utilities')) 4041 4042 if self.banner.get('run_card', 'event_norm').lower() == 'sum': 4043 norm = 1. 4044 else: 4045 norm = 1./float(self.shower_card['nsplit_jobs']) 4046 4047 plotfiles2 = [] 4048 for i, file in enumerate(topfiles_set): 4049 filelist = ['%s%d__%d.%s' % (filename, i, j + 1,ext) \ 4050 for j in range(self.shower_card['nsplit_jobs'])] 4051 if out_id=='TOP': 4052 infile="%d\n%s\n%s\n" % \ 4053 (self.shower_card['nsplit_jobs'], 4054 '\n'.join(filelist), 4055 '\n'.join([str(norm)] * self.shower_card['nsplit_jobs'])) 4056 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'sum_plots')], 4057 stdin=subprocess.PIPE, 4058 stdout=os.open(os.devnull, os.O_RDWR), 4059 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4060 p.communicate(input = infile) 4061 files.mv(pjoin(self.me_dir, 'Events', self.run_name, 'sum.top'), 4062 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.top' % (filename, i))) 4063 elif out_id=='HWU': 4064 out=pjoin(self.me_dir,'Events', 4065 self.run_name,'%s%d'% (filename,i)) 4066 histos=[] 4067 norms=[] 4068 for plotfile in plotfiles: 4069 histos.append({'dirname':plotfile}) 4070 norms.append(norm) 4071 self.combine_plots_HwU(histos,out,normalisation=norms) 4072 try: 4073 misc.call(['gnuplot','%s%d.gnuplot' % (filename, i)],\ 4074 stdout=os.open(os.devnull, os.O_RDWR),\ 4075 stderr=os.open(os.devnull, os.O_RDWR),\ 4076 cwd=pjoin(self.me_dir, 'Events',self.run_name)) 4077 except Exception: 4078 pass 4079 4080 plotfiles2.append(pjoin(self.me_dir, 'Events', self.run_name, '%s%d.%s' % (filename, i,ext))) 4081 tar = tarfile.open( 4082 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.tar.gz' % (filename, i)), 'w:gz') 4083 for f in filelist: 4084 tar.add(pjoin(self.me_dir, 'Events', self.run_name, f), arcname=f) 4085 files.rm([pjoin(self.me_dir, 'Events', self.run_name, f) for f in filelist]) 4086 4087 tar.close() 4088 4089 ffiles = 'files' 4090 have = 'have' 4091 if len(plotfiles2) == 1: 4092 ffiles = 'file' 4093 have = 'has' 4094 4095 message = ('The %s %s %s been generated, with histograms in the' + \ 4096 ' %s, obtained by showering the parton-level' + \ 4097 ' file %s.gz with %s.\n' + \ 4098 'The files from the different shower ' + \ 4099 'jobs (before combining them) can be found inside %s.') % \ 4100 (ffiles, ', '.join(plotfiles2), have, hist_format,\ 4101 evt_file, shower, 4102 ', '.join([f.replace('%s' % ext, 'tar.gz') for f in plotfiles2])) 4103 4104 else: 4105 message = ('The following files have been generated:\n %s\n' + \ 4106 'They contain histograms in the' + \ 4107 ' %s, obtained by showering the parton-level' + \ 4108 ' file %s.gz with %s.') % ('\n '.join(plotfiles), \ 4109 hist_format, evt_file, shower) 4110 4111 # Now arxiv the shower card used if RunMaterial is present 4112 run_dir_path = pjoin(rundir, self.run_name) 4113 if os.path.exists(pjoin(run_dir_path,'RunMaterial.tar.gz')): 4114 misc.call(['tar','-xzpf','RunMaterial.tar.gz'],cwd=run_dir_path) 4115 files.cp(pjoin(self.me_dir,'Cards','shower_card.dat'), 4116 pjoin(run_dir_path,'RunMaterial','shower_card_for_%s_%d.dat'\ 4117 %(shower, count))) 4118 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'], 4119 cwd=run_dir_path) 4120 shutil.rmtree(pjoin(run_dir_path,'RunMaterial')) 4121 # end of the run, gzip files and print out the message/warning 4122 for f in to_gzip: 4123 misc.gzip(f) 4124 if message: 4125 logger.info(message) 4126 if warning: 4127 logger.warning(warning) 4128 4129 self.update_status('Run complete', level='shower', update_results=True)
4130 4131 ############################################################################
4132 - def set_run_name(self, name, tag=None, level='parton', reload_card=False,**opts):
4133 """define the run name, the run_tag, the banner and the results.""" 4134 4135 # when are we force to change the tag new_run:previous run requiring changes 4136 upgrade_tag = {'parton': ['parton','delphes','shower','madanalysis5_hadron'], 4137 'shower': ['shower','delphes','madanalysis5_hadron'], 4138 'delphes':['delphes'], 4139 'madanalysis5_hadron':['madanalysis5_hadron'], 4140 'plot':[]} 4141 4142 if name == self.run_name: 4143 if reload_card: 4144 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4145 self.run_card = banner_mod.RunCardNLO(run_card) 4146 4147 #check if we need to change the tag 4148 if tag: 4149 self.run_card['run_tag'] = tag 4150 self.run_tag = tag 4151 self.results.add_run(self.run_name, self.run_card) 4152 else: 4153 for tag in upgrade_tag[level]: 4154 if getattr(self.results[self.run_name][-1], tag): 4155 tag = self.get_available_tag() 4156 self.run_card['run_tag'] = tag 4157 self.run_tag = tag 4158 self.results.add_run(self.run_name, self.run_card) 4159 break 4160 return # Nothing to do anymore 4161 4162 # save/clean previous run 4163 if self.run_name: 4164 self.store_result() 4165 # store new name 4166 self.run_name = name 4167 4168 # Read run_card 4169 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4170 self.run_card = banner_mod.RunCardNLO(run_card) 4171 4172 new_tag = False 4173 # First call for this run -> set the banner 4174 self.banner = banner_mod.recover_banner(self.results, level, self.run_name, tag) 4175 if 'mgruncard' in self.banner: 4176 self.run_card = self.banner.charge_card('run_card') 4177 if tag: 4178 self.run_card['run_tag'] = tag 4179 new_tag = True 4180 elif not self.run_name in self.results and level =='parton': 4181 pass # No results yet, so current tag is fine 4182 elif not self.run_name in self.results: 4183 #This is only for case when you want to trick the interface 4184 logger.warning('Trying to run data on unknown run.') 4185 self.results.add_run(name, self.run_card) 4186 self.results.update('add run %s' % name, 'all', makehtml=True) 4187 else: 4188 for tag in upgrade_tag[level]: 4189 4190 if getattr(self.results[self.run_name][-1], tag): 4191 # LEVEL is already define in the last tag -> need to switch tag 4192 tag = self.get_available_tag() 4193 self.run_card['run_tag'] = tag 4194 new_tag = True 4195 break 4196 if not new_tag: 4197 # We can add the results to the current run 4198 tag = self.results[self.run_name][-1]['tag'] 4199 self.run_card['run_tag'] = tag # ensure that run_tag is correct 4200 4201 4202 if name in self.results and not new_tag: 4203 self.results.def_current(self.run_name) 4204 else: 4205 self.results.add_run(self.run_name, self.run_card) 4206 4207 self.run_tag = self.run_card['run_tag'] 4208 4209 # Return the tag of the previous run having the required data for this 4210 # tag/run to working wel. 4211 if level == 'parton': 4212 return 4213 elif level == 'pythia': 4214 return self.results[self.run_name][0]['tag'] 4215 else: 4216 for i in range(-1,-len(self.results[self.run_name])-1,-1): 4217 tagRun = self.results[self.run_name][i] 4218 if tagRun.pythia: 4219 return tagRun['tag']
4220 4221
4222 - def store_result(self):
4223 """ tar the pythia results. This is done when we are quite sure that 4224 the pythia output will not be use anymore """ 4225 4226 if not self.run_name: 4227 return 4228 4229 self.results.save() 4230 4231 if not self.to_store: 4232 return 4233 4234 if 'event' in self.to_store: 4235 if os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')): 4236 if not os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')): 4237 self.update_status('gzipping output file: events.lhe', level='parton', error=True) 4238 misc.gzip(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4239 else: 4240 os.remove(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4241 if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): 4242 os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) 4243 4244 4245 tag = self.run_card['run_tag'] 4246 4247 self.to_store = []
4248 4249 4250 ############################################################################
4251 - def get_Gdir(self, Pdir=None):
4252 """get the list of Gdirectory if not yet saved.""" 4253 4254 if hasattr(self, "Gdirs"): 4255 if self.me_dir in self.Gdirs: 4256 if Pdir is None: 4257 return sum(self.Gdirs.values()) 4258 else: 4259 return self.Gdirs[Pdir] 4260 4261 Pdirs = self.get_Pdir() 4262 Gdirs = {self.me_dir:[]} 4263 for P in Pdirs: 4264 Gdirs[P] = [pjoin(P,G) for G in os.listdir(P) if G.startswith('G') and 4265 os.path.isdir(pjoin(P,G))] 4266 4267 self.Gdirs = Gdirs 4268 return self.getGdir(Pdir)
4269 4270
4271 - def get_init_dict(self, evt_file):
4272 """reads the info in the init block and returns them in a dictionary""" 4273 ev_file = open(evt_file) 4274 init = "" 4275 found = False 4276 while True: 4277 line = ev_file.readline() 4278 if "<init>" in line: 4279 found = True 4280 elif found and not line.startswith('#'): 4281 init += line 4282 if "</init>" in line or "<event>" in line: 4283 break 4284 ev_file.close() 4285 4286 # IDBMUP(1),IDBMUP(2),EBMUP(1),EBMUP(2), PDFGUP(1),PDFGUP(2), 4287 # PDFSUP(1),PDFSUP(2),IDWTUP,NPRUP 4288 # these are not included (so far) in the init_dict 4289 # XSECUP(1),XERRUP(1),XMAXUP(1),LPRUP(1) 4290 4291 init_dict = {} 4292 init_dict['idbmup1'] = int(init.split()[0]) 4293 init_dict['idbmup2'] = int(init.split()[1]) 4294 init_dict['ebmup1'] = float(init.split()[2]) 4295 init_dict['ebmup2'] = float(init.split()[3]) 4296 init_dict['pdfgup1'] = int(init.split()[4]) 4297 init_dict['pdfgup2'] = int(init.split()[5]) 4298 init_dict['pdfsup1'] = int(init.split()[6]) 4299 init_dict['pdfsup2'] = int(init.split()[7]) 4300 init_dict['idwtup'] = int(init.split()[8]) 4301 init_dict['nprup'] = int(init.split()[9]) 4302 4303 return init_dict
4304 4305
4306 - def banner_to_mcatnlo(self, evt_file):
4307 """creates the mcatnlo input script using the values set in the header of the event_file. 4308 It also checks if the lhapdf library is used""" 4309 4310 shower = self.banner.get('run_card', 'parton_shower').upper() 4311 pdlabel = self.banner.get('run_card', 'pdlabel') 4312 itry = 0 4313 nevents = self.shower_card['nevents'] 4314 init_dict = self.get_init_dict(evt_file) 4315 4316 if nevents < 0 or \ 4317 nevents > self.banner.get_detail('run_card', 'nevents'): 4318 nevents = self.banner.get_detail('run_card', 'nevents') 4319 4320 nevents = nevents / self.shower_card['nsplit_jobs'] 4321 4322 mcmass_dict = {} 4323 for line in [l for l in self.banner['montecarlomasses'].split('\n') if l]: 4324 pdg = int(line.split()[0]) 4325 mass = float(line.split()[1]) 4326 mcmass_dict[pdg] = mass 4327 4328 content = 'EVPREFIX=%s\n' % pjoin(os.path.split(evt_file)[1]) 4329 content += 'NEVENTS=%d\n' % nevents 4330 content += 'NEVENTS_TOT=%d\n' % (self.banner.get_detail('run_card', 'nevents') /\ 4331 self.shower_card['nsplit_jobs']) 4332 content += 'MCMODE=%s\n' % shower 4333 content += 'PDLABEL=%s\n' % pdlabel 4334 4335 try: 4336 aewm1 = self.banner.get_detail('param_card', 'sminputs', 1).value 4337 raise KeyError 4338 except KeyError: 4339 mod = self.get_model() 4340 if not hasattr(mod, 'parameter_dict'): 4341 from models import model_reader 4342 mod = model_reader.ModelReader(mod) 4343 mod.set_parameters_and_couplings(self.banner.param_card) 4344 aewm1 = 0 4345 for key in ['aEWM1', 'AEWM1', 'aEWm1', 'aewm1']: 4346 if key in mod['parameter_dict']: 4347 aewm1 = mod['parameter_dict'][key] 4348 break 4349 elif 'mdl_%s' % key in mod['parameter_dict']: 4350 aewm1 = mod['parameter_dict']['mod_%s' % key] 4351 break 4352 else: 4353 for key in ['aEW', 'AEW', 'aEw', 'aew']: 4354 if key in mod['parameter_dict']: 4355 aewm1 = 1./mod['parameter_dict'][key] 4356 break 4357 elif 'mdl_%s' % key in mod['parameter_dict']: 4358 aewm1 = 1./mod['parameter_dict']['mod_%s' % key] 4359 break 4360 4361 content += 'ALPHAEW=%s\n' % aewm1 4362 #content += 'PDFSET=%s\n' % self.banner.get_detail('run_card', 'lhaid') 4363 #content += 'PDFSET=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4364 content += 'TMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 6).value 4365 content += 'TWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 6).value 4366 content += 'ZMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 23).value 4367 content += 'ZWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 23).value 4368 content += 'WMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 24).value 4369 content += 'WWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 24).value 4370 try: 4371 content += 'HGGMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 25).value 4372 content += 'HGGWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 25).value 4373 except KeyError: 4374 content += 'HGGMASS=120.\n' 4375 content += 'HGGWIDTH=0.00575308848\n' 4376 content += 'beammom1=%s\n' % self.banner.get_detail('run_card', 'ebeam1') 4377 content += 'beammom2=%s\n' % self.banner.get_detail('run_card', 'ebeam2') 4378 content += 'BEAM1=%s\n' % self.banner.get_detail('run_card', 'lpp1') 4379 content += 'BEAM2=%s\n' % self.banner.get_detail('run_card', 'lpp2') 4380 content += 'DMASS=%s\n' % mcmass_dict[1] 4381 content += 'UMASS=%s\n' % mcmass_dict[2] 4382 content += 'SMASS=%s\n' % mcmass_dict[3] 4383 content += 'CMASS=%s\n' % mcmass_dict[4] 4384 content += 'BMASS=%s\n' % mcmass_dict[5] 4385 try: 4386 content += 'EMASS=%s\n' % mcmass_dict[11] 4387 content += 'MUMASS=%s\n' % mcmass_dict[13] 4388 content += 'TAUMASS=%s\n' % mcmass_dict[15] 4389 except KeyError: 4390 # this is for backward compatibility 4391 mcmass_lines = [l for l in \ 4392 open(pjoin(self.me_dir, 'SubProcesses', 'MCmasses_%s.inc' % shower.upper()) 4393 ).read().split('\n') if l] 4394 new_mcmass_dict = {} 4395 for l in mcmass_lines: 4396 key, val = l.split('=') 4397 new_mcmass_dict[key.strip()] = val.replace('d', 'e').strip() 4398 content += 'EMASS=%s\n' % new_mcmass_dict['mcmass(11)'] 4399 content += 'MUMASS=%s\n' % new_mcmass_dict['mcmass(13)'] 4400 content += 'TAUMASS=%s\n' % new_mcmass_dict['mcmass(15)'] 4401 4402 content += 'GMASS=%s\n' % mcmass_dict[21] 4403 content += 'EVENT_NORM=%s\n' % self.banner.get_detail('run_card', 'event_norm').lower() 4404 # check if need to link lhapdf 4405 if int(self.shower_card['pdfcode']) > 1 or \ 4406 (pdlabel=='lhapdf' and int(self.shower_card['pdfcode'])==1) or \ 4407 shower=='HERWIGPP' : 4408 # Use LHAPDF (should be correctly installed, because 4409 # either events were already generated with them, or the 4410 # user explicitly gives an LHAPDF number in the 4411 # shower_card). 4412 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4413 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4414 stdout = subprocess.PIPE).stdout.read().strip() 4415 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4416 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4417 if self.shower_card['pdfcode']==0: 4418 lhaid_list = '' 4419 content += '' 4420 elif self.shower_card['pdfcode']==1: 4421 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4422 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4423 else: 4424 lhaid_list = [abs(int(self.shower_card['pdfcode']))] 4425 content += 'PDFCODE=%s\n' % self.shower_card['pdfcode'] 4426 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4427 elif int(self.shower_card['pdfcode'])==1 or \ 4428 int(self.shower_card['pdfcode'])==-1 and True: 4429 # Try to use LHAPDF because user wants to use the same PDF 4430 # as was used for the event generation. However, for the 4431 # event generation, LHAPDF was not used, so non-trivial to 4432 # see if if LHAPDF is available with the corresponding PDF 4433 # set. If not found, give a warning and use build-in PDF 4434 # set instead. 4435 try: 4436 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4437 stdout = subprocess.PIPE).stdout.read().strip() 4438 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4439 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4440 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4441 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4442 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4443 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4444 except Exception: 4445 logger.warning('Trying to shower events using the same PDF in the shower as used in the generation'+\ 4446 ' of the events using LHAPDF. However, no valid LHAPDF installation found with the'+\ 4447 ' needed PDF set. Will use default internal PDF for the shower instead. To use the'+\ 4448 ' same set as was used in the event generation install LHAPDF and set the path using'+\ 4449 ' "set /path_to_lhapdf/bin/lhapdf-config" from the MadGraph5_aMC@NLO python shell') 4450 content += 'LHAPDFPATH=\n' 4451 content += 'PDFCODE=0\n' 4452 else: 4453 content += 'LHAPDFPATH=\n' 4454 content += 'PDFCODE=0\n' 4455 4456 content += 'ICKKW=%s\n' % self.banner.get_detail('run_card', 'ickkw') 4457 content += 'PTJCUT=%s\n' % self.banner.get_detail('run_card', 'ptj') 4458 # add the pythia8/hwpp path(s) 4459 if self.options['pythia8_path']: 4460 content+='PY8PATH=%s\n' % self.options['pythia8_path'] 4461 if self.options['hwpp_path']: 4462 content+='HWPPPATH=%s\n' % self.options['hwpp_path'] 4463 if self.options['thepeg_path'] and self.options['thepeg_path'] != self.options['hwpp_path']: 4464 content+='THEPEGPATH=%s\n' % self.options['thepeg_path'] 4465 if self.options['hepmc_path'] and self.options['hepmc_path'] != self.options['hwpp_path']: 4466 content+='HEPMCPATH=%s\n' % self.options['hepmc_path'] 4467 4468 output = open(pjoin(self.me_dir, 'MCatNLO', 'banner.dat'), 'w') 4469 output.write(content) 4470 output.close() 4471 return shower
4472 4473
4474 - def run_reweight(self, only):
4475 """runs the reweight_xsec_events executables on each sub-event file generated 4476 to compute on the fly scale and/or PDF uncertainities""" 4477 logger.info(' Doing reweight') 4478 4479 nev_unw = pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted') 4480 # if only doing reweight, copy back the nevents_unweighted file 4481 if only: 4482 if os.path.exists(nev_unw + '.orig'): 4483 files.cp(nev_unw + '.orig', nev_unw) 4484 else: 4485 raise aMCatNLOError('Cannot find event file information') 4486 4487 #read the nevents_unweighted file to get the list of event files 4488 file = open(nev_unw) 4489 lines = file.read().split('\n') 4490 file.close() 4491 # make copy of the original nevent_unweighted file 4492 files.cp(nev_unw, nev_unw + '.orig') 4493 # loop over lines (all but the last one whith is empty) and check that the 4494 # number of events is not 0 4495 evt_files = [line.split()[0] for line in lines[:-1] if line.split()[1] != '0'] 4496 evt_wghts = [float(line.split()[3]) for line in lines[:-1] if line.split()[1] != '0'] 4497 if self.run_card['event_norm'].lower()=='bias' and self.run_card['nevents'] != 0: 4498 evt_wghts[:]=[1./float(self.run_card['nevents']) for wgt in evt_wghts] 4499 #prepare the job_dict 4500 job_dict = {} 4501 exe = 'reweight_xsec_events.local' 4502 for i, evt_file in enumerate(evt_files): 4503 path, evt = os.path.split(evt_file) 4504 files.ln(pjoin(self.me_dir, 'SubProcesses', exe), \ 4505 pjoin(self.me_dir, 'SubProcesses', path)) 4506 job_dict[path] = [exe] 4507 4508 self.run_all(job_dict, [[evt, '1']], 'Running reweight') 4509 4510 #check that the new event files are complete 4511 for evt_file in evt_files: 4512 last_line = subprocess.Popen(['tail', '-n1', '%s.rwgt' % \ 4513 pjoin(self.me_dir, 'SubProcesses', evt_file)], \ 4514 stdout = subprocess.PIPE).stdout.read().strip() 4515 if last_line != "</LesHouchesEvents>": 4516 raise aMCatNLOError('An error occurred during reweight. Check the' + \ 4517 '\'reweight_xsec_events.output\' files inside the ' + \ 4518 '\'SubProcesses/P*/G*/ directories for details') 4519 4520 #update file name in nevents_unweighted 4521 newfile = open(nev_unw, 'w') 4522 for line in lines: 4523 if line: 4524 newfile.write(line.replace(line.split()[0], line.split()[0] + '.rwgt') + '\n') 4525 newfile.close() 4526 4527 return self.pdf_scale_from_reweighting(evt_files,evt_wghts)
4528
4529 - def pdf_scale_from_reweighting(self, evt_files,evt_wghts):
4530 """This function takes the files with the scale and pdf values 4531 written by the reweight_xsec_events.f code 4532 (P*/G*/pdf_scale_dependence.dat) and computes the overall 4533 scale and PDF uncertainty (the latter is computed using the 4534 Hessian method (if lhaid<90000) or Gaussian (if lhaid>90000)) 4535 and returns it in percents. The expected format of the file 4536 is: n_scales xsec_scale_central xsec_scale1 ... n_pdf 4537 xsec_pdf0 xsec_pdf1 ....""" 4538 4539 scales=[] 4540 pdfs=[] 4541 for i,evt_file in enumerate(evt_files): 4542 path, evt=os.path.split(evt_file) 4543 with open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat'),'r') as f: 4544 data_line=f.readline() 4545 if "scale variations:" in data_line: 4546 for j,scale in enumerate(self.run_card['dynamical_scale_choice']): 4547 data_line = f.readline().split() 4548 scales_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4549 try: 4550 scales[j] = [a + b for a, b in zip(scales[j], scales_this)] 4551 except IndexError: 4552 scales+=[scales_this] 4553 data_line=f.readline() 4554 if "pdf variations:" in data_line: 4555 for j,pdf in enumerate(self.run_card['lhaid']): 4556 data_line = f.readline().split() 4557 pdfs_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4558 try: 4559 pdfs[j] = [a + b for a, b in zip(pdfs[j], pdfs_this)] 4560 except IndexError: 4561 pdfs+=[pdfs_this] 4562 4563 # get the scale uncertainty in percent 4564 scale_info=[] 4565 for j,scale in enumerate(scales): 4566 s_cen=scale[0] 4567 if s_cen != 0.0 and self.run_card['reweight_scale'][j]: 4568 # max and min of the full envelope 4569 s_max=(max(scale)/s_cen-1)*100 4570 s_min=(1-min(scale)/s_cen)*100 4571 # ren and fac scale dependence added in quadrature 4572 ren_var=[] 4573 fac_var=[] 4574 for i in range(len(self.run_card['rw_rscale'])): 4575 ren_var.append(scale[i]-s_cen) # central fac scale 4576 for i in range(len(self.run_card['rw_fscale'])): 4577 fac_var.append(scale[i*len(self.run_card['rw_rscale'])]-s_cen) # central ren scale 4578 s_max_q=((s_cen+math.sqrt(math.pow(max(ren_var),2)+math.pow(max(fac_var),2)))/s_cen-1)*100 4579 s_min_q=(1-(s_cen-math.sqrt(math.pow(min(ren_var),2)+math.pow(min(fac_var),2)))/s_cen)*100 4580 s_size=len(scale) 4581 else: 4582 s_max=0.0 4583 s_min=0.0 4584 s_max_q=0.0 4585 s_min_q=0.0 4586 s_size=len(scale) 4587 scale_info.append({'cen':s_cen, 'min':s_min, 'max':s_max, \ 4588 'min_q':s_min_q, 'max_q':s_max_q, 'size':s_size, \ 4589 'label':self.run_card['dynamical_scale_choice'][j], \ 4590 'unc':self.run_card['reweight_scale'][j]}) 4591 4592 # check if we can use LHAPDF to compute the PDF uncertainty 4593 if any(self.run_card['reweight_pdf']): 4594 use_lhapdf=False 4595 lhapdf_libdir=subprocess.Popen([self.options['lhapdf'],'--libdir'],\ 4596 stdout=subprocess.PIPE).stdout.read().strip() 4597 4598 try: 4599 candidates=[dirname for dirname in os.listdir(lhapdf_libdir) \ 4600 if os.path.isdir(pjoin(lhapdf_libdir,dirname))] 4601 except OSError: 4602 candidates=[] 4603 for candidate in candidates: 4604 if os.path.isfile(pjoin(lhapdf_libdir,candidate,'site-packages','lhapdf.so')): 4605 sys.path.insert(0,pjoin(lhapdf_libdir,candidate,'site-packages')) 4606 try: 4607 import lhapdf 4608 use_lhapdf=True 4609 break 4610 except ImportError: 4611 sys.path.pop(0) 4612 continue 4613 4614 if not use_lhapdf: 4615 try: 4616 candidates=[dirname for dirname in os.listdir(lhapdf_libdir+'64') \ 4617 if os.path.isdir(pjoin(lhapdf_libdir+'64',dirname))] 4618 except OSError: 4619 candidates=[] 4620 for candidate in candidates: 4621 if os.path.isfile(pjoin(lhapdf_libdir+'64',candidate,'site-packages','lhapdf.so')): 4622 sys.path.insert(0,pjoin(lhapdf_libdir+'64',candidate,'site-packages')) 4623 try: 4624 import lhapdf 4625 use_lhapdf=True 4626 break 4627 except ImportError: 4628 sys.path.pop(0) 4629 continue 4630 4631 if not use_lhapdf: 4632 try: 4633 import lhapdf 4634 use_lhapdf=True 4635 except ImportError: 4636 logger.warning("Failed to access python version of LHAPDF: "\ 4637 "cannot compute PDF uncertainty from the "\ 4638 "weights in the events. The weights in the LHE " \ 4639 "event files will still cover all PDF set members, "\ 4640 "but there will be no PDF uncertainty printed in the run summary. \n "\ 4641 "If the python interface to LHAPDF is available on your system, try "\ 4642 "adding its location to the PYTHONPATH environment variable and the"\ 4643 "LHAPDF library location to LD_LIBRARY_PATH (linux) or DYLD_LIBRARY_PATH (mac os x).") 4644 use_lhapdf=False 4645 4646 # turn off lhapdf printing any messages 4647 if any(self.run_card['reweight_pdf']) and use_lhapdf: lhapdf.setVerbosity(0) 4648 4649 pdf_info=[] 4650 for j,pdfset in enumerate(pdfs): 4651 p_cen=pdfset[0] 4652 if p_cen != 0.0 and self.run_card['reweight_pdf'][j]: 4653 if use_lhapdf: 4654 pdfsetname=self.run_card['lhapdfsetname'][j] 4655 try: 4656 p=lhapdf.getPDFSet(pdfsetname) 4657 ep=p.uncertainty(pdfset,-1) 4658 p_cen=ep.central 4659 p_min=abs(ep.errminus/p_cen)*100 4660 p_max=abs(ep.errplus/p_cen)*100 4661 p_type=p.errorType 4662 p_size=p.size 4663 p_conf=p.errorConfLevel 4664 except: 4665 logger.warning("Could not access LHAPDF to compute uncertainties for %s" % pdfsetname) 4666 p_min=0.0 4667 p_max=0.0 4668 p_type='unknown' 4669 p_conf='unknown' 4670 p_size=len(pdfset) 4671 else: 4672 p_min=0.0 4673 p_max=0.0 4674 p_type='unknown' 4675 p_conf='unknown' 4676 p_size=len(pdfset) 4677 pdfsetname=self.run_card['lhaid'][j] 4678 else: 4679 p_min=0.0 4680 p_max=0.0 4681 p_type='none' 4682 p_conf='unknown' 4683 p_size=len(pdfset) 4684 pdfsetname=self.run_card['lhaid'][j] 4685 pdf_info.append({'cen':p_cen, 'min':p_min, 'max':p_max, \ 4686 'unc':p_type, 'name':pdfsetname, 'size':p_size, \ 4687 'label':self.run_card['lhaid'][j], 'conf':p_conf}) 4688 4689 scale_pdf_info=[scale_info,pdf_info] 4690 return scale_pdf_info
4691 4692
4693 - def wait_for_complete(self, run_type):
4694 """this function waits for jobs on cluster to complete their run.""" 4695 starttime = time.time() 4696 #logger.info(' Waiting for submitted jobs to complete') 4697 update_status = lambda i, r, f: self.update_status((i, r, f, run_type), 4698 starttime=starttime, level='parton', update_results=True) 4699 try: 4700 self.cluster.wait(self.me_dir, update_status) 4701 except: 4702 self.cluster.remove() 4703 raise
4704
4705 - def run_all(self, job_dict, arg_list, run_type='monitor', split_jobs = False):
4706 """runs the jobs in job_dict (organized as folder: [job_list]), with arguments args""" 4707 self.ijob = 0 4708 if run_type != 'shower': 4709 self.njobs = sum(len(jobs) for jobs in job_dict.values()) * len(arg_list) 4710 for args in arg_list: 4711 for Pdir, jobs in job_dict.items(): 4712 for job in jobs: 4713 self.run_exe(job, args, run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 4714 if self.cluster_mode == 2: 4715 time.sleep(1) # security to allow all jobs to be launched 4716 else: 4717 self.njobs = len(arg_list) 4718 for args in arg_list: 4719 [(cwd, exe)] = job_dict.items() 4720 self.run_exe(exe, args, run_type, cwd) 4721 4722 self.wait_for_complete(run_type)
4723 4724 4725
4726 - def check_event_files(self,jobs):
4727 """check the integrity of the event files after splitting, and resubmit 4728 those which are not nicely terminated""" 4729 jobs_to_resubmit = [] 4730 for job in jobs: 4731 last_line = '' 4732 try: 4733 last_line = subprocess.Popen( 4734 ['tail', '-n1', pjoin(job['dirname'], 'events.lhe')], \ 4735 stdout = subprocess.PIPE).stdout.read().strip() 4736 except IOError: 4737 pass 4738 if last_line != "</LesHouchesEvents>": 4739 jobs_to_resubmit.append(job) 4740 self.njobs = 0 4741 if jobs_to_resubmit: 4742 run_type = 'Resubmitting broken jobs' 4743 logger.info('Some event files are broken, corresponding jobs will be resubmitted.') 4744 for job in jobs_to_resubmit: 4745 logger.debug('Resubmitting ' + job['dirname'] + '\n') 4746 self.run_all_jobs(jobs_to_resubmit,2,fixed_order=False)
4747 4748
4749 - def find_jobs_to_split(self, pdir, job, arg):
4750 """looks into the nevents_unweighed_splitted file to check how many 4751 split jobs are needed for this (pdir, job). arg is F, B or V""" 4752 # find the number of the integration channel 4753 splittings = [] 4754 ajob = open(pjoin(self.me_dir, 'SubProcesses', pdir, job)).read() 4755 pattern = re.compile('for i in (\d+) ; do') 4756 match = re.search(pattern, ajob) 4757 channel = match.groups()[0] 4758 # then open the nevents_unweighted_splitted file and look for the 4759 # number of splittings to be done 4760 nevents_file = open(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted')).read() 4761 # This skips the channels with zero events, because they are 4762 # not of the form GFXX_YY, but simply GFXX 4763 pattern = re.compile(r"%s_(\d+)/events.lhe" % \ 4764 pjoin(pdir, 'G%s%s' % (arg,channel))) 4765 matches = re.findall(pattern, nevents_file) 4766 for m in matches: 4767 splittings.append(m) 4768 return splittings
4769 4770
4771 - def run_exe(self, exe, args, run_type, cwd=None):
4772 """this basic function launch locally/on cluster exe with args as argument. 4773 """ 4774 # first test that exe exists: 4775 execpath = None 4776 if cwd and os.path.exists(pjoin(cwd, exe)): 4777 execpath = pjoin(cwd, exe) 4778 elif not cwd and os.path.exists(exe): 4779 execpath = exe 4780 else: 4781 raise aMCatNLOError('Cannot find executable %s in %s' \ 4782 % (exe, os.getcwd())) 4783 # check that the executable has exec permissions 4784 if self.cluster_mode == 1 and not os.access(execpath, os.X_OK): 4785 subprocess.call(['chmod', '+x', exe], cwd=cwd) 4786 # finally run it 4787 if self.cluster_mode == 0: 4788 #this is for the serial run 4789 misc.call(['./'+exe] + args, cwd=cwd) 4790 self.ijob += 1 4791 self.update_status((max([self.njobs - self.ijob - 1, 0]), 4792 min([1, self.njobs - self.ijob]), 4793 self.ijob, run_type), level='parton') 4794 4795 #this is for the cluster/multicore run 4796 elif 'reweight' in exe: 4797 # a reweight run 4798 # Find the correct PDF input file 4799 input_files, output_files = [], [] 4800 pdfinput = self.get_pdf_input_filename() 4801 if os.path.exists(pdfinput): 4802 input_files.append(pdfinput) 4803 input_files.append(pjoin(os.path.dirname(exe), os.path.pardir, 'reweight_xsec_events')) 4804 input_files.append(pjoin(cwd, os.path.pardir, 'leshouche_info.dat')) 4805 input_files.append(args[0]) 4806 output_files.append('%s.rwgt' % os.path.basename(args[0])) 4807 output_files.append('reweight_xsec_events.output') 4808 output_files.append('scale_pdf_dependence.dat') 4809 4810 return self.cluster.submit2(exe, args, cwd=cwd, 4811 input_files=input_files, output_files=output_files, 4812 required_output=output_files) 4813 4814 elif 'ajob' in exe: 4815 # the 'standard' amcatnlo job 4816 # check if args is a list of string 4817 if type(args[0]) == str: 4818 input_files, output_files, required_output, args = self.getIO_ajob(exe,cwd,args) 4819 #submitting 4820 self.cluster.submit2(exe, args, cwd=cwd, 4821 input_files=input_files, output_files=output_files, 4822 required_output=required_output) 4823 4824 # # keep track of folders and arguments for splitted evt gen 4825 # subfolder=output_files[-1].split('/')[0] 4826 # if len(args) == 4 and '_' in subfolder: 4827 # self.split_folders[pjoin(cwd,subfolder)] = [exe] + args 4828 4829 elif 'shower' in exe: 4830 # a shower job 4831 # args are [shower, output(HEP or TOP), run_name] 4832 # cwd is the shower rundir, where the executable are found 4833 input_files, output_files = [], [] 4834 shower = args[0] 4835 # the input files 4836 if shower == 'PYTHIA8': 4837 input_files.append(pjoin(cwd, 'Pythia8.exe')) 4838 input_files.append(pjoin(cwd, 'Pythia8.cmd')) 4839 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 4840 input_files.append(pjoin(cwd, 'config.sh')) 4841 input_files.append(pjoin(self.options['pythia8_path'], 'xmldoc')) 4842 else: 4843 input_files.append(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc')) 4844 else: 4845 input_files.append(pjoin(cwd, 'MCATNLO_%s_EXE' % shower)) 4846 input_files.append(pjoin(cwd, 'MCATNLO_%s_input' % shower)) 4847 if shower == 'HERWIGPP': 4848 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 4849 input_files.append(pjoin(cwd, 'Herwig++')) 4850 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 4851 input_files.append(pjoin(cwd, 'Herwig')) 4852 input_files.append(pjoin(cwd, 'HepMCFortran.so')) 4853 if len(args) == 3: 4854 if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')): 4855 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')) 4856 elif os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')): 4857 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')) 4858 else: 4859 raise aMCatNLOError, 'Event file not present in %s' % \ 4860 pjoin(self.me_dir, 'Events', self.run_name) 4861 else: 4862 input_files.append(pjoin(cwd, 'events_%s.lhe' % args[3])) 4863 # the output files 4864 if len(args) == 3: 4865 output_files.append('mcatnlo_run.log') 4866 else: 4867 output_files.append('mcatnlo_run_%s.log' % args[3]) 4868 if args[1] == 'HEP': 4869 if len(args) == 3: 4870 fname = 'events' 4871 else: 4872 fname = 'events_%s' % args[3] 4873 if shower in ['PYTHIA8', 'HERWIGPP']: 4874 output_files.append(fname + '.hepmc.gz') 4875 else: 4876 output_files.append(fname + '.hep.gz') 4877 elif args[1] == 'TOP' or args[1] == 'HWU': 4878 if len(args) == 3: 4879 fname = 'histfile' 4880 else: 4881 fname = 'histfile_%s' % args[3] 4882 output_files.append(fname + '.tar') 4883 else: 4884 raise aMCatNLOError, 'Not a valid output argument for shower job : %d' % args[1] 4885 #submitting 4886 self.cluster.submit2(exe, args, cwd=cwd, 4887 input_files=input_files, output_files=output_files) 4888 4889 else: 4890 return self.cluster.submit(exe, args, cwd=cwd)
4891
4892 - def getIO_ajob(self,exe,cwd, args):
4893 # use local disk if possible => need to stands what are the 4894 # input/output files 4895 4896 output_files = [] 4897 required_output = [] 4898 input_files = [pjoin(self.me_dir, 'SubProcesses', 'randinit'), 4899 pjoin(cwd, 'symfact.dat'), 4900 pjoin(cwd, 'iproc.dat'), 4901 pjoin(cwd, 'initial_states_map.dat'), 4902 pjoin(cwd, 'configs_and_props_info.dat'), 4903 pjoin(cwd, 'leshouche_info.dat'), 4904 pjoin(cwd, 'FKS_params.dat')] 4905 4906 # For GoSam interface, we must copy the SLHA card as well 4907 if os.path.exists(pjoin(self.me_dir,'OLP_virtuals','gosam.rc')): 4908 input_files.append(pjoin(self.me_dir, 'Cards', 'param_card.dat')) 4909 4910 if os.path.exists(pjoin(cwd,'nevents.tar')): 4911 input_files.append(pjoin(cwd,'nevents.tar')) 4912 4913 if os.path.exists(pjoin(self.me_dir,'SubProcesses','OLE_order.olc')): 4914 input_files.append(pjoin(cwd, 'OLE_order.olc')) 4915 4916 # File for the loop (might not be present if MadLoop is not used) 4917 if os.path.exists(pjoin(cwd,'MadLoop5_resources.tar.gz')) and \ 4918 cluster.need_transfer(self.options): 4919 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 4920 elif os.path.exists(pjoin(cwd,'MadLoop5_resources')) and \ 4921 cluster.need_transfer(self.options): 4922 tf=tarfile.open(pjoin(cwd,'MadLoop5_resources.tar.gz'),'w:gz', 4923 dereference=True) 4924 tf.add(pjoin(cwd,'MadLoop5_resources'),arcname='MadLoop5_resources') 4925 tf.close() 4926 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 4927 4928 if args[1] == 'born' or args[1] == 'all': 4929 # MADEVENT MINT FO MODE 4930 input_files.append(pjoin(cwd, 'madevent_mintFO')) 4931 if args[2] == '0': 4932 current = '%s_G%s' % (args[1],args[0]) 4933 else: 4934 current = '%s_G%s_%s' % (args[1],args[0],args[2]) 4935 if os.path.exists(pjoin(cwd,current)): 4936 input_files.append(pjoin(cwd, current)) 4937 output_files.append(current) 4938 4939 required_output.append('%s/results.dat' % current) 4940 required_output.append('%s/res_%s.dat' % (current,args[3])) 4941 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 4942 required_output.append('%s/mint_grids' % current) 4943 required_output.append('%s/grid.MC_integer' % current) 4944 if args[3] != '0': 4945 required_output.append('%s/scale_pdf_dependence.dat' % current) 4946 4947 elif args[1] == 'F' or args[1] == 'B': 4948 # MINTMC MODE 4949 input_files.append(pjoin(cwd, 'madevent_mintMC')) 4950 4951 if args[2] == '0': 4952 current = 'G%s%s' % (args[1],args[0]) 4953 else: 4954 current = 'G%s%s_%s' % (args[1],args[0],args[2]) 4955 if os.path.exists(pjoin(cwd,current)): 4956 input_files.append(pjoin(cwd, current)) 4957 output_files.append(current) 4958 if args[2] > '0': 4959 # this is for the split event generation 4960 output_files.append('G%s%s_%s' % (args[1], args[0], args[2])) 4961 required_output.append('G%s%s_%s/log_MINT%s.txt' % (args[1],args[0],args[2],args[3])) 4962 4963 else: 4964 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 4965 if args[3] in ['0','1']: 4966 required_output.append('%s/results.dat' % current) 4967 if args[3] == '1': 4968 output_files.append('%s/results.dat' % current) 4969 4970 else: 4971 raise aMCatNLOError, 'not valid arguments: %s' %(', '.join(args)) 4972 4973 #Find the correct PDF input file 4974 pdfinput = self.get_pdf_input_filename() 4975 if os.path.exists(pdfinput): 4976 input_files.append(pdfinput) 4977 return input_files, output_files, required_output, args
4978 4979
4980 - def compile(self, mode, options):
4981 """compiles aMC@NLO to compute either NLO or NLO matched to shower, as 4982 specified in mode""" 4983 4984 os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) 4985 4986 self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, 4987 '%s_%s_banner.txt' % (self.run_name, self.run_tag))) 4988 4989 self.get_characteristics(pjoin(self.me_dir, 4990 'SubProcesses', 'proc_characteristics')) 4991 4992 #define a bunch of log files 4993 amcatnlo_log = pjoin(self.me_dir, 'compile_amcatnlo.log') 4994 madloop_log = pjoin(self.me_dir, 'compile_madloop.log') 4995 reweight_log = pjoin(self.me_dir, 'compile_reweight.log') 4996 test_log = pjoin(self.me_dir, 'test.log') 4997 4998 # environmental variables to be included in make_opts 4999 self.make_opts_var = {} 5000 if self.proc_characteristics['has_loops'] and \ 5001 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5002 self.make_opts_var['madloop'] = 'true' 5003 5004 self.update_status('Compiling the code', level=None, update_results=True) 5005 5006 libdir = pjoin(self.me_dir, 'lib') 5007 sourcedir = pjoin(self.me_dir, 'Source') 5008 5009 #clean files 5010 files.rm([amcatnlo_log, madloop_log, reweight_log, test_log]) 5011 #define which executable/tests to compile 5012 if '+' in mode: 5013 mode = mode.split('+')[0] 5014 if mode in ['NLO', 'LO']: 5015 exe = 'madevent_mintFO' 5016 tests = ['test_ME'] 5017 self.analyse_card.write_card(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts')) 5018 elif mode in ['aMC@NLO', 'aMC@LO','noshower','noshowerLO']: 5019 exe = 'madevent_mintMC' 5020 tests = ['test_ME', 'test_MC'] 5021 # write an analyse_opts with a dummy analysis so that compilation goes through 5022 with open(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts'),'w') as fsock: 5023 fsock.write('FO_ANALYSE=analysis_dummy.o dbook.o open_output_files_dummy.o HwU_dummy.o\n') 5024 5025 #directory where to compile exe 5026 p_dirs = [d for d in \ 5027 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 5028 # create param_card.inc and run_card.inc 5029 self.do_treatcards('', amcatnlo=True, mode=mode) 5030 # if --nocompile option is specified, check here that all exes exists. 5031 # If they exists, return 5032 if all([os.path.exists(pjoin(self.me_dir, 'SubProcesses', p_dir, exe)) \ 5033 for p_dir in p_dirs]) and options['nocompile']: 5034 return 5035 5036 # rm links to lhapdflib/ PDFsets if exist 5037 if os.path.exists(pjoin(libdir, 'PDFsets')): 5038 files.rm(pjoin(libdir, 'PDFsets')) 5039 5040 # read the run_card to find if lhapdf is used or not 5041 if self.run_card['pdlabel'] == 'lhapdf' and \ 5042 (self.banner.get_detail('run_card', 'lpp1') != 0 or \ 5043 self.banner.get_detail('run_card', 'lpp2') != 0): 5044 5045 self.link_lhapdf(libdir, [pjoin('SubProcesses', p) for p in p_dirs]) 5046 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 5047 lhaid_list = self.run_card['lhaid'] 5048 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 5049 5050 else: 5051 if self.run_card['lpp1'] == 1 == self.run_card['lpp2']: 5052 logger.info('Using built-in libraries for PDFs') 5053 5054 self.make_opts_var['lhapdf'] = "" 5055 5056 # read the run_card to find if applgrid is used or not 5057 if self.run_card['iappl'] != 0: 5058 self.make_opts_var['applgrid'] = 'True' 5059 # check versions of applgrid and amcfast 5060 for code in ['applgrid','amcfast']: 5061 try: 5062 p = subprocess.Popen([self.options[code], '--version'], \ 5063 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 5064 except OSError: 5065 raise aMCatNLOError(('No valid %s installation found. \n' + \ 5066 'Please set the path to %s-config by using \n' + \ 5067 'MG5_aMC> set <absolute-path-to-%s>/bin/%s-config \n') % (code,code,code,code)) 5068 else: 5069 output, _ = p.communicate() 5070 if code is 'applgrid' and output < '1.4.63': 5071 raise aMCatNLOError('Version of APPLgrid is too old. Use 1.4.69 or later.'\ 5072 +' You are using %s',output) 5073 if code is 'amcfast' and output < '1.1.1': 5074 raise aMCatNLOError('Version of aMCfast is too old. Use 1.1.1 or later.'\ 5075 +' You are using %s',output) 5076 5077 # set-up the Source/make_opts with the correct applgrid-config file 5078 appllibs=" APPLLIBS=$(shell %s --ldflags) $(shell %s --ldcflags) \n" \ 5079 % (self.options['amcfast'],self.options['applgrid']) 5080 text=open(pjoin(self.me_dir,'Source','make_opts'),'r').readlines() 5081 text_out=[] 5082 for line in text: 5083 if line.strip().startswith('APPLLIBS=$'): 5084 line=appllibs 5085 text_out.append(line) 5086 with open(pjoin(self.me_dir,'Source','make_opts'),'w') as fsock: 5087 fsock.writelines(text_out) 5088 else: 5089 self.make_opts_var['applgrid'] = "" 5090 5091 if 'fastjet' in self.options.keys() and self.options['fastjet']: 5092 self.make_opts_var['fastjet_config'] = self.options['fastjet'] 5093 5094 # add the make_opts_var to make_opts 5095 self.update_make_opts() 5096 5097 # make Source 5098 self.update_status('Compiling source...', level=None) 5099 misc.compile(['clean4pdf'], cwd = sourcedir) 5100 misc.compile(cwd = sourcedir) 5101 if os.path.exists(pjoin(libdir, 'libdhelas.a')) \ 5102 and os.path.exists(pjoin(libdir, 'libgeneric.a')) \ 5103 and os.path.exists(pjoin(libdir, 'libmodel.a')) \ 5104 and os.path.exists(pjoin(libdir, 'libpdf.a')): 5105 logger.info(' ...done, continuing with P* directories') 5106 else: 5107 raise aMCatNLOError('Compilation failed') 5108 5109 # make StdHep (only necessary with MG option output_dependencies='internal') 5110 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 5111 if not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))) or \ 5112 not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libFmcfio.a'))): 5113 if os.path.exists(pjoin(sourcedir,'StdHEP')): 5114 logger.info('Compiling StdHEP (can take a couple of minutes) ...') 5115 try: 5116 misc.compile(['StdHEP'], cwd = sourcedir) 5117 except Exception as error: 5118 logger.debug(str(error)) 5119 logger.warning("StdHep failed to compiled. This forbids to run NLO+PS with PY6 and Herwig6") 5120 logger.info("details on the compilation error are available if the code is run with --debug flag") 5121 else: 5122 logger.info(' ...done.') 5123 else: 5124 logger.warning('Could not compile StdHEP because its'+\ 5125 ' source directory could not be found in the SOURCE folder.\n'+\ 5126 " Check the MG5_aMC option 'output_dependencies'.\n"+\ 5127 " This will prevent the use of HERWIG6/Pythia6 shower.") 5128 5129 5130 # make CutTools (only necessary with MG option output_dependencies='internal') 5131 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5132 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5133 if os.path.exists(pjoin(sourcedir,'CutTools')): 5134 logger.info('Compiling CutTools (can take a couple of minutes) ...') 5135 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5136 logger.info(' ...done.') 5137 else: 5138 raise aMCatNLOError('Could not compile CutTools because its'+\ 5139 ' source directory could not be found in the SOURCE folder.\n'+\ 5140 " Check the MG5_aMC option 'output_dependencies.'") 5141 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5142 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5143 raise aMCatNLOError('CutTools compilation failed.') 5144 5145 # Verify compatibility between current compiler and the one which was 5146 # used when last compiling CutTools (if specified). 5147 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5148 libdir, 'libcts.a')))),'compiler_version.log') 5149 if os.path.exists(compiler_log_path): 5150 compiler_version_used = open(compiler_log_path,'r').read() 5151 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5152 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5153 if os.path.exists(pjoin(sourcedir,'CutTools')): 5154 logger.info('CutTools was compiled with a different fortran'+\ 5155 ' compiler. Re-compiling it now...') 5156 misc.compile(['cleanCT'], cwd = sourcedir) 5157 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5158 logger.info(' ...done.') 5159 else: 5160 raise aMCatNLOError("CutTools installation in %s"\ 5161 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 5162 " seems to have been compiled with a different compiler than"+\ 5163 " the one specified in MG5_aMC. Please recompile CutTools.") 5164 5165 # make IREGI (only necessary with MG option output_dependencies='internal') 5166 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libiregi.a'))) \ 5167 and os.path.exists(pjoin(sourcedir,'IREGI')): 5168 logger.info('Compiling IREGI (can take a couple of minutes) ...') 5169 misc.compile(['IREGI'], cwd = sourcedir) 5170 logger.info(' ...done.') 5171 5172 if os.path.exists(pjoin(libdir, 'libiregi.a')): 5173 # Verify compatibility between current compiler and the one which was 5174 # used when last compiling IREGI (if specified). 5175 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5176 libdir, 'libiregi.a')))),'compiler_version.log') 5177 if os.path.exists(compiler_log_path): 5178 compiler_version_used = open(compiler_log_path,'r').read() 5179 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5180 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5181 if os.path.exists(pjoin(sourcedir,'IREGI')): 5182 logger.info('IREGI was compiled with a different fortran'+\ 5183 ' compiler. Re-compiling it now...') 5184 misc.compile(['cleanIR'], cwd = sourcedir) 5185 misc.compile(['IREGI'], cwd = sourcedir) 5186 logger.info(' ...done.') 5187 else: 5188 raise aMCatNLOError("IREGI installation in %s"\ 5189 %os.path.realpath(pjoin(libdir, 'libiregi.a'))+\ 5190 " seems to have been compiled with a different compiler than"+\ 5191 " the one specified in MG5_aMC. Please recompile IREGI.") 5192 5193 # check if MadLoop virtuals have been generated 5194 if self.proc_characteristics['has_loops'] and \ 5195 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5196 if mode in ['NLO', 'aMC@NLO', 'noshower']: 5197 tests.append('check_poles') 5198 5199 # make and run tests (if asked for), gensym and make madevent in each dir 5200 self.update_status('Compiling directories...', level=None) 5201 5202 for test in tests: 5203 self.write_test_input(test) 5204 5205 try: 5206 import multiprocessing 5207 if not self.nb_core: 5208 try: 5209 self.nb_core = int(self.options['nb_core']) 5210 except TypeError: 5211 self.nb_core = multiprocessing.cpu_count() 5212 except ImportError: 5213 self.nb_core = 1 5214 5215 compile_options = copy.copy(self.options) 5216 compile_options['nb_core'] = self.nb_core 5217 compile_cluster = cluster.MultiCore(**compile_options) 5218 logger.info('Compiling on %d cores' % self.nb_core) 5219 5220 update_status = lambda i, r, f: self.donothing(i,r,f) 5221 for p_dir in p_dirs: 5222 compile_cluster.submit(prog = compile_dir, 5223 argument = [self.me_dir, p_dir, mode, options, 5224 tests, exe, self.options['run_mode']]) 5225 try: 5226 compile_cluster.wait(self.me_dir, update_status) 5227 except Exception, error: 5228 logger.warning("Fail to compile the Subprocesses") 5229 if __debug__: 5230 raise 5231 compile_cluster.remove() 5232 self.do_quit('') 5233 5234 logger.info('Checking test output:') 5235 for p_dir in p_dirs: 5236 logger.info(p_dir) 5237 for test in tests: 5238 logger.info(' Result for %s:' % test) 5239 5240 this_dir = pjoin(self.me_dir, 'SubProcesses', p_dir) 5241 #check that none of the tests failed 5242 self.check_tests(test, this_dir)
5243 5244
5245 - def donothing(*args):
5246 pass
5247 5248
5249 - def check_tests(self, test, dir):
5250 """just call the correct parser for the test log. 5251 Skip check_poles for LOonly folders""" 5252 if test in ['test_ME', 'test_MC']: 5253 return self.parse_test_mx_log(pjoin(dir, '%s.log' % test)) 5254 elif test == 'check_poles' and not os.path.exists(pjoin(dir,'parton_lum_0.f')): 5255 return self.parse_check_poles_log(pjoin(dir, '%s.log' % test))
5256 5257
5258 - def parse_test_mx_log(self, log):
5259 """read and parse the test_ME/MC.log file""" 5260 content = open(log).read() 5261 if 'FAILED' in content: 5262 logger.info('Output of the failing test:\n'+content[:-1],'$MG:BOLD') 5263 raise aMCatNLOError('Some tests failed, run cannot continue.\n' + \ 5264 'Please check that widths of final state particles (e.g. top) have been' + \ 5265 ' set to 0 in the param_card.dat.') 5266 else: 5267 lines = [l for l in content.split('\n') if 'PASSED' in l] 5268 logger.info(' Passed.') 5269 logger.debug('\n'+'\n'.join(lines))
5270 5271
5272 - def parse_check_poles_log(self, log):
5273 """reads and parse the check_poles.log file""" 5274 content = open(log).read() 5275 npass = 0 5276 nfail = 0 5277 for line in content.split('\n'): 5278 if 'PASSED' in line: 5279 npass +=1 5280 tolerance = float(line.split()[1]) 5281 if 'FAILED' in line: 5282 nfail +=1 5283 tolerance = float(line.split()[1]) 5284 5285 if nfail + npass == 0: 5286 logger.warning('0 points have been tried') 5287 return 5288 5289 if float(nfail)/float(nfail+npass) > 0.1: 5290 raise aMCatNLOError('Poles do not cancel, run cannot continue') 5291 else: 5292 logger.info(' Poles successfully cancel for %d points over %d (tolerance=%2.1e)' \ 5293 %(npass, nfail+npass, tolerance))
5294 5295
5296 - def write_test_input(self, test):
5297 """write the input files to run test_ME/MC or check_poles""" 5298 if test in ['test_ME', 'test_MC']: 5299 content = "-2 -2\n" #generate randomly energy/angle 5300 content+= "100 100\n" #run 100 points for soft and collinear tests 5301 content+= "0\n" #all FKS configs 5302 content+= '\n'.join(["-1"] * 50) #random diagram (=first diagram) 5303 elif test == 'check_poles': 5304 content = '20 \n -1\n' 5305 5306 file = open(pjoin(self.me_dir, '%s_input.txt' % test), 'w') 5307 if test == 'test_MC': 5308 shower = self.run_card['parton_shower'] 5309 header = "1 \n %s\n 1 -0.1\n-1 -0.1\n" % shower 5310 file.write(header + content) 5311 elif test == 'test_ME': 5312 header = "2 \n" 5313 file.write(header + content) 5314 else: 5315 file.write(content) 5316 file.close()
5317 5318 5319 action_switcher = AskRunNLO 5320 ############################################################################
5321 - def ask_run_configuration(self, mode, options, switch={}):
5322 """Ask the question when launching generate_events/multi_run""" 5323 5324 if 'parton' not in options: 5325 options['parton'] = False 5326 if 'reweightonly' not in options: 5327 options['reweightonly'] = False 5328 5329 if mode == 'auto': 5330 mode = None 5331 if not mode and (options['parton'] or options['reweightonly']): 5332 mode = 'noshower' 5333 5334 passing_cmd = [] 5335 for key,value in switch.keys(): 5336 passing_cmd.append('%s=%s' % (key,value)) 5337 5338 if 'do_reweight' in options and options['do_reweight']: 5339 passing_cmd.append('reweight=ON') 5340 if 'do_madspin' in options and options['do_madspin']: 5341 passing_cmd.append('madspin=ON') 5342 5343 force = self.force 5344 if mode == 'onlyshower': 5345 passing_cmd.append('onlyshower') 5346 force = True 5347 elif mode: 5348 passing_cmd.append(mode) 5349 5350 switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, 5351 mode=mode, force=force, 5352 first_cmd=passing_cmd, 5353 return_instance=True) 5354 5355 if 'mode' in switch: 5356 mode = switch['mode'] 5357 5358 #assign the mode depending of the switch 5359 if not mode or mode == 'auto': 5360 if switch['order'] == 'LO': 5361 if switch['runshower']: 5362 mode = 'aMC@LO' 5363 elif switch['fixed_order'] == 'ON': 5364 mode = 'LO' 5365 else: 5366 mode = 'noshowerLO' 5367 elif switch['order'] == 'NLO': 5368 if switch['runshower']: 5369 mode = 'aMC@NLO' 5370 elif switch['fixed_order'] == 'ON': 5371 mode = 'NLO' 5372 else: 5373 mode = 'noshower' 5374 logger.info('will run in mode: %s' % mode) 5375 5376 if mode == 'noshower': 5377 if switch['shower'] == 'OFF': 5378 logger.warning("""You have chosen not to run a parton shower. 5379 NLO events without showering are NOT physical. 5380 Please, shower the LesHouches events before using them for physics analyses. 5381 You have to choose NOW which parton-shower you WILL use and specify it in the run_card.""") 5382 else: 5383 logger.info("""Your Parton-shower choice is not available for running. 5384 The events will be generated for the associated Parton-Shower. 5385 Remember that NLO events without showering are NOT physical.""", '$MG:BOLD') 5386 5387 5388 # specify the cards which are needed for this run. 5389 cards = ['param_card.dat', 'run_card.dat'] 5390 ignore = [] 5391 if mode in ['LO', 'NLO']: 5392 options['parton'] = True 5393 ignore = ['shower_card.dat', 'madspin_card.dat'] 5394 cards.append('FO_analyse_card.dat') 5395 else: 5396 if switch['madspin'] != 'OFF': 5397 cards.append('madspin_card.dat') 5398 if switch['reweight'] != 'OFF': 5399 cards.append('reweight_card.dat') 5400 if switch['madanalysis'] in ['HADRON', 'ON']: 5401 cards.append('madanalysis5_hadron_card.dat') 5402 if 'aMC@' in mode: 5403 cards.append('shower_card.dat') 5404 if mode == 'onlyshower': 5405 cards = ['shower_card.dat'] 5406 if options['reweightonly']: 5407 cards = ['run_card.dat'] 5408 5409 self.keep_cards(cards, ignore) 5410 5411 if mode =='onlyshower': 5412 cards = ['shower_card.dat'] 5413 5414 5415 # automatically switch to keep_wgt option 5416 first_cmd = cmd_switch.get_cardcmd() 5417 5418 if not options['force'] and not self.force: 5419 self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) 5420 5421 self.banner = banner_mod.Banner() 5422 5423 # store the cards in the banner 5424 for card in cards: 5425 self.banner.add(pjoin(self.me_dir, 'Cards', card)) 5426 # and the run settings 5427 run_settings = '\n'.join(['%s = %s' % (k, v) for (k, v) in switch.items()]) 5428 self.banner.add_text('run_settings', run_settings) 5429 5430 if not mode =='onlyshower': 5431 self.run_card = self.banner.charge_card('run_card') 5432 self.run_tag = self.run_card['run_tag'] 5433 #this is if the user did not provide a name for the current run 5434 if not hasattr(self, 'run_name') or not self.run_name: 5435 self.run_name = self.find_available_run_name(self.me_dir) 5436 #add a tag in the run_name for distinguish run_type 5437 if self.run_name.startswith('run_'): 5438 if mode in ['LO','aMC@LO','noshowerLO']: 5439 self.run_name += '_LO' 5440 self.set_run_name(self.run_name, self.run_tag, 'parton') 5441 if self.run_card['ickkw'] == 3 and mode in ['LO', 'aMC@LO', 'noshowerLO']: 5442 raise self.InvalidCmd("""FxFx merging (ickkw=3) not allowed at LO""") 5443 elif self.run_card['ickkw'] == 3 and mode in ['aMC@NLO', 'noshower']: 5444 logger.warning("""You are running with FxFx merging enabled. To be able to merge 5445 samples of various multiplicities without double counting, you 5446 have to remove some events after showering 'by hand'. Please 5447 read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 5448 if self.run_card['parton_shower'].upper() == 'PYTHIA6Q': 5449 raise self.InvalidCmd("""FxFx merging does not work with Q-squared ordered showers.""") 5450 elif self.run_card['parton_shower'].upper() != 'HERWIG6' and self.run_card['parton_shower'].upper() != 'PYTHIA8' and self.run_card['parton_shower'].upper() != 'HERWIGPP': 5451 question="FxFx merging not tested for %s shower. Do you want to continue?\n" % self.run_card['parton_shower'] + \ 5452 "Type \'n\' to stop or \'y\' to continue" 5453 answers = ['n','y'] 5454 answer = self.ask(question, 'n', answers) 5455 if answer == 'n': 5456 error = '''Stop opertation''' 5457 self.ask_run_configuration(mode, options) 5458 # raise aMCatNLOError(error) 5459 elif self.run_card['ickkw'] == -1 and mode in ['aMC@NLO', 'noshower']: 5460 # NNLL+NLO jet-veto only possible for LO event generation or fNLO runs. 5461 raise self.InvalidCmd("""NNLL+NLO jet veto runs (ickkw=-1) only possible for fNLO or LO.""") 5462 if 'aMC@' in mode or mode == 'onlyshower': 5463 self.shower_card = self.banner.charge_card('shower_card') 5464 5465 elif mode in ['LO', 'NLO']: 5466 analyse_card_path = pjoin(self.me_dir, 'Cards','FO_analyse_card.dat') 5467 self.analyse_card = self.banner.charge_card('FO_analyse_card') 5468 5469 return mode
5470
5471 5472 #=============================================================================== 5473 # aMCatNLOCmd 5474 #=============================================================================== 5475 -class aMCatNLOCmdShell(aMCatNLOCmd, cmd.CmdShell):
5476 """The command line processor of MadGraph"""
5477 5478 _compile_usage = "compile [MODE] [options]\n" + \ 5479 "-- compiles aMC@NLO \n" + \ 5480 " MODE can be either FO, for fixed-order computations, \n" + \ 5481 " or MC for matching with parton-shower monte-carlos. \n" + \ 5482 " (if omitted, it is set to MC)\n" 5483 _compile_parser = misc.OptionParser(usage=_compile_usage) 5484 _compile_parser.add_option("-f", "--force", default=False, action='store_true', 5485 help="Use the card present in the directory for the launch, without editing them") 5486 5487 _launch_usage = "launch [MODE] [options]\n" + \ 5488 "-- execute aMC@NLO \n" + \ 5489 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5490 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5491 " computation of the total cross section and the filling of parton-level histograms \n" + \ 5492 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 5493 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5494 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5495 " in the run_card.dat\n" 5496 5497 _launch_parser = misc.OptionParser(usage=_launch_usage) 5498 _launch_parser.add_option("-f", "--force", default=False, action='store_true', 5499 help="Use the card present in the directory for the launch, without editing them") 5500 _launch_parser.add_option("-c", "--cluster", default=False, action='store_true', 5501 help="Submit the jobs on the cluster") 5502 _launch_parser.add_option("-m", "--multicore", default=False, action='store_true', 5503 help="Submit the jobs on multicore mode") 5504 _launch_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5505 help="Skip compilation. Ignored if no executable is found") 5506 _launch_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5507 help="Skip integration and event generation, just run reweight on the" + \ 5508 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5509 _launch_parser.add_option("-p", "--parton", default=False, action='store_true', 5510 help="Stop the run after the parton level file generation (you need " + \ 5511 "to shower the file in order to get physical results)") 5512 _launch_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5513 help="Skip grid set up, just generate events starting from " + \ 5514 "the last available results") 5515 _launch_parser.add_option("-n", "--name", default=False, dest='run_name', 5516 help="Provide a name to the run") 5517 _launch_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 5518 help="For use with APPLgrid only: start from existing grids") 5519 _launch_parser.add_option("-R", "--reweight", default=False, dest='do_reweight', action='store_true', 5520 help="Run the reweight module (reweighting by different model parameters)") 5521 _launch_parser.add_option("-M", "--madspin", default=False, dest='do_madspin', action='store_true', 5522 help="Run the madspin package") 5523 5524 5525 5526 _generate_events_usage = "generate_events [MODE] [options]\n" + \ 5527 "-- execute aMC@NLO \n" + \ 5528 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5529 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5530 " computation of the total cross section and the filling of parton-level histograms \n" + \ 5531 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 5532 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5533 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5534 " in the run_card.dat\n" 5535 5536 _generate_events_parser = misc.OptionParser(usage=_generate_events_usage) 5537 _generate_events_parser.add_option("-f", "--force", default=False, action='store_true', 5538 help="Use the card present in the directory for the generate_events, without editing them") 5539 _generate_events_parser.add_option("-c", "--cluster", default=False, action='store_true', 5540 help="Submit the jobs on the cluster") 5541 _generate_events_parser.add_option("-m", "--multicore", default=False, action='store_true', 5542 help="Submit the jobs on multicore mode") 5543 _generate_events_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5544 help="Skip compilation. Ignored if no executable is found") 5545 _generate_events_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5546 help="Skip integration and event generation, just run reweight on the" + \ 5547 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5548 _generate_events_parser.add_option("-p", "--parton", default=False, action='store_true', 5549 help="Stop the run after the parton level file generation (you need " + \ 5550 "to shower the file in order to get physical results)") 5551 _generate_events_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5552 help="Skip grid set up, just generate events starting from " + \ 5553 "the last available results") 5554 _generate_events_parser.add_option("-n", "--name", default=False, dest='run_name', 5555 help="Provide a name to the run") 5556 5557 5558 5559 _calculate_xsect_usage = "calculate_xsect [ORDER] [options]\n" + \ 5560 "-- calculate cross section up to ORDER.\n" + \ 5561 " ORDER can be either LO or NLO (if omitted, it is set to NLO). \n" 5562 5563 _calculate_xsect_parser = misc.OptionParser(usage=_calculate_xsect_usage) 5564 _calculate_xsect_parser.add_option("-f", "--force", default=False, action='store_true', 5565 help="Use the card present in the directory for the launch, without editing them") 5566 _calculate_xsect_parser.add_option("-c", "--cluster", default=False, action='store_true', 5567 help="Submit the jobs on the cluster") 5568 _calculate_xsect_parser.add_option("-m", "--multicore", default=False, action='store_true', 5569 help="Submit the jobs on multicore mode") 5570 _calculate_xsect_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5571 help="Skip compilation. Ignored if no executable is found") 5572 _calculate_xsect_parser.add_option("-n", "--name", default=False, dest='run_name', 5573 help="Provide a name to the run") 5574 _calculate_xsect_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 5575 help="For use with APPLgrid only: start from existing grids") 5576 _calculate_xsect_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5577 help="Skip grid set up, just generate events starting from " + \ 5578 "the last available results") 5579 5580 _shower_usage = 'shower run_name [options]\n' + \ 5581 '-- do shower/hadronization on parton-level file generated for run run_name\n' + \ 5582 ' all the information (e.g. number of events, MonteCarlo, ...\n' + \ 5583 ' are directly read from the header of the event file\n' 5584 _shower_parser = misc.OptionParser(usage=_shower_usage) 5585 _shower_parser.add_option("-f", "--force", default=False, action='store_true', 5586 help="Use the shower_card present in the directory for the launch, without editing") 5587 5588 if '__main__' == __name__: 5589 # Launch the interface without any check if one code is already running. 5590 # This can ONLY run a single command !! 5591 import sys 5592 if not sys.version_info[0] == 2 or sys.version_info[1] < 6: 5593 sys.exit('MadGraph/MadEvent 5 works only with python 2.6 or later (but not python 3.X).\n'+\ 5594 'Please upgrate your version of python.') 5595 5596 import os 5597 import optparse 5598 # Get the directory of the script real path (bin) 5599 # and add it to the current PYTHONPATH 5600 root_path = os.path.dirname(os.path.dirname(os.path.realpath( __file__ ))) 5601 sys.path.insert(0, root_path)
5602 5603 - class MyOptParser(optparse.OptionParser):
5604 - class InvalidOption(Exception): pass
5605 - def error(self, msg=''):
5606 raise MyOptParser.InvalidOption(msg)
5607 # Write out nice usage message if called with -h or --help 5608 usage = "usage: %prog [options] [FILE] " 5609 parser = MyOptParser(usage=usage) 5610 parser.add_option("-l", "--logging", default='INFO', 5611 help="logging level (DEBUG|INFO|WARNING|ERROR|CRITICAL) [%default]") 5612 parser.add_option("","--web", action="store_true", default=False, dest='web', \ 5613 help='force toce to be in secure mode') 5614 parser.add_option("","--debug", action="store_true", default=False, dest='debug', \ 5615 help='force to launch debug mode') 5616 parser_error = '' 5617 done = False 5618 5619 for i in range(len(sys.argv)-1): 5620 try: 5621 (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) 5622 done = True 5623 except MyOptParser.InvalidOption, error: 5624 pass 5625 else: 5626 args += sys.argv[len(sys.argv)-i:] 5627 if not done: 5628 # raise correct error: 5629 try: 5630 (options, args) = parser.parse_args() 5631 except MyOptParser.InvalidOption, error: 5632 print error 5633 sys.exit(2) 5634 5635 if len(args) == 0: 5636 args = '' 5637 5638 import subprocess 5639 import logging 5640 import logging.config 5641 # Set logging level according to the logging level given by options 5642 #logging.basicConfig(level=vars(logging)[options.logging]) 5643 import internal.coloring_logging 5644 try: 5645 if __debug__ and options.logging == 'INFO': 5646 options.logging = 'DEBUG' 5647 if options.logging.isdigit(): 5648 level = int(options.logging) 5649 else: 5650 level = eval('logging.' + options.logging) 5651 print os.path.join(root_path, 'internal', 'me5_logging.conf') 5652 logging.config.fileConfig(os.path.join(root_path, 'internal', 'me5_logging.conf')) 5653 logging.root.setLevel(level) 5654 logging.getLogger('madgraph').setLevel(level) 5655 except: 5656 raise 5657 pass 5658 5659 # Call the cmd interface main loop 5660 try: 5661 if args: 5662 # a single command is provided 5663 if '--web' in args: 5664 i = args.index('--web') 5665 args.pop(i) 5666 cmd_line = aMCatNLOCmd(me_dir=os.path.dirname(root_path),force_run=True) 5667 else: 5668 cmd_line = aMCatNLOCmdShell(me_dir=os.path.dirname(root_path),force_run=True) 5669 5670 if not hasattr(cmd_line, 'do_%s' % args[0]): 5671 if parser_error: 5672 print parser_error 5673 print 'and %s can not be interpreted as a valid command.' % args[0] 5674 else: 5675 print 'ERROR: %s not a valid command. Please retry' % args[0] 5676 else: 5677 cmd_line.use_rawinput = False 5678 cmd_line.run_cmd(' '.join(args)) 5679 cmd_line.run_cmd('quit') 5680 5681 except KeyboardInterrupt: 5682 print 'quit on KeyboardInterrupt' 5683 pass 5684