1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Several different checks for processes (and hence models):
16 permutation tests, gauge invariance tests, lorentz invariance
17 tests. Also class for evaluation of Python matrix elements,
18 MatrixElementEvaluator."""
19
20 from __future__ import division
21
22 import array
23 import copy
24 import fractions
25 import itertools
26 import logging
27 import math
28 import os
29 import sys
30 import re
31 import shutil
32 import random
33 import glob
34 import re
35 import subprocess
36 import time
37 import datetime
38 import errno
39 import pickle
40
41
42
43 import aloha
44 import aloha.aloha_writers as aloha_writers
45 import aloha.create_aloha as create_aloha
46
47 import madgraph.iolibs.export_python as export_python
48 import madgraph.iolibs.helas_call_writers as helas_call_writers
49 import models.import_ufo as import_ufo
50 import madgraph.iolibs.save_load_object as save_load_object
51 import madgraph.iolibs.file_writers as writers
52
53 import madgraph.core.base_objects as base_objects
54 import madgraph.core.color_algebra as color
55 import madgraph.core.color_amp as color_amp
56 import madgraph.core.helas_objects as helas_objects
57 import madgraph.core.diagram_generation as diagram_generation
58
59 import madgraph.various.rambo as rambo
60 import madgraph.various.misc as misc
61 import madgraph.various.progressbar as pbar
62 import madgraph.various.banner as bannermod
63 import madgraph.various.progressbar as pbar
64
65 import madgraph.loop.loop_diagram_generation as loop_diagram_generation
66 import madgraph.loop.loop_helas_objects as loop_helas_objects
67 import madgraph.loop.loop_base_objects as loop_base_objects
68 import models.check_param_card as check_param_card
69
70 from madgraph.interface.madevent_interface import MadLoopInitializer
71 from madgraph.interface.common_run_interface import AskforEditCard
72 from madgraph import MG5DIR, InvalidCmd, MadGraph5Error
73
74 from madgraph.iolibs.files import cp
75
76 import StringIO
77 import models.model_reader as model_reader
78 import aloha.template_files.wavefunctions as wavefunctions
79 from aloha.template_files.wavefunctions import \
80 ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx
81
82 ADDED_GLOBAL = []
83
84 temp_dir_prefix = "TMP_CHECK"
85
86 pjoin = os.path.join
89 for value in list(to_clean):
90 del globals()[value]
91 to_clean.remove(value)
92
97 """ Just an 'option container' to mimick the interface which is passed to the
98 tests. We put in only what is now used from interface by the test:
99 cmd.options['fortran_compiler']
100 cmd.options['complex_mass_scheme']
101 cmd._mgme_dir"""
102 - def __init__(self, mgme_dir = "", complex_mass_scheme = False,
103 fortran_compiler = 'gfortran' ):
104 self._mgme_dir = mgme_dir
105 self.options = {}
106 self.options['complex_mass_scheme']=complex_mass_scheme
107 self.options['fortran_compiler']=fortran_compiler
108
109
110
111
112
113 logger = logging.getLogger('madgraph.various.process_checks')
118 """boost the set momenta in the 'boost direction' by the 'beta'
119 factor"""
120
121 boost_p = []
122 gamma = 1/ math.sqrt(1 - beta**2)
123 for imp in p:
124 bosst_p = imp[boost_direction]
125 E, px, py, pz = imp
126 boost_imp = []
127
128 boost_imp.append(gamma * E - gamma * beta * bosst_p)
129
130 if boost_direction == 1:
131 boost_imp.append(-gamma * beta * E + gamma * px)
132 else:
133 boost_imp.append(px)
134
135 if boost_direction == 2:
136 boost_imp.append(-gamma * beta * E + gamma * py)
137 else:
138 boost_imp.append(py)
139
140 if boost_direction == 3:
141 boost_imp.append(-gamma * beta * E + gamma * pz)
142 else:
143 boost_imp.append(pz)
144
145 boost_p.append(boost_imp)
146
147 return boost_p
148
153 """Class taking care of matrix element evaluation, storing
154 relevant quantities for speedup."""
155
156 - def __init__(self, model , param_card = None,
157 auth_skipping = False, reuse = True, cmd = FakeInterface()):
158 """Initialize object with stored_quantities, helas_writer,
159 model, etc.
160 auth_skipping = True means that any identical matrix element will be
161 evaluated only once
162 reuse = True means that the matrix element corresponding to a
163 given process can be reused (turn off if you are using
164 different models for the same process)"""
165
166 self.cmd = cmd
167
168
169 self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model)
170
171
172 self.full_model = model_reader.ModelReader(model)
173 try:
174 self.full_model.set_parameters_and_couplings(param_card)
175 except MadGraph5Error:
176 if isinstance(param_card, (str,file)):
177 raise
178 logger.warning('param_card present in the event file not compatible.'+
179 ' We will use the default one.')
180 self.full_model.set_parameters_and_couplings()
181
182 self.auth_skipping = auth_skipping
183 self.reuse = reuse
184 self.cmass_scheme = cmd.options['complex_mass_scheme']
185 self.store_aloha = []
186 self.stored_quantities = {}
187
188
189
190
191 - def evaluate_matrix_element(self, matrix_element, p=None, full_model=None,
192 gauge_check=False, auth_skipping=None, output='m2',
193 options=None):
194 """Calculate the matrix element and evaluate it for a phase space point
195 output is either m2, amp, jamp
196 """
197
198 if full_model:
199 self.full_model = full_model
200 process = matrix_element.get('processes')[0]
201 model = process.get('model')
202
203 if "matrix_elements" not in self.stored_quantities:
204 self.stored_quantities['matrix_elements'] = []
205 matrix_methods = {}
206
207 if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p:
208
209 matrix = eval("Matrix_%s()" % process.shell_string())
210 me_value = matrix.smatrix(p, self.full_model)
211 if output == "m2":
212 return matrix.smatrix(p, self.full_model), matrix.amp2
213 else:
214 m2 = matrix.smatrix(p, self.full_model)
215 return {'m2': m2, output:getattr(matrix, output)}
216 if (auth_skipping or self.auth_skipping) and matrix_element in \
217 self.stored_quantities['matrix_elements']:
218
219 logger.info("Skipping %s, " % process.nice_string() + \
220 "identical matrix element already tested" \
221 )
222 return None
223
224 self.stored_quantities['matrix_elements'].append(matrix_element)
225
226
227
228
229 if "list_colorize" not in self.stored_quantities:
230 self.stored_quantities["list_colorize"] = []
231 if "list_color_basis" not in self.stored_quantities:
232 self.stored_quantities["list_color_basis"] = []
233 if "list_color_matrices" not in self.stored_quantities:
234 self.stored_quantities["list_color_matrices"] = []
235
236 col_basis = color_amp.ColorBasis()
237 new_amp = matrix_element.get_base_amplitude()
238 matrix_element.set('base_amplitude', new_amp)
239 colorize_obj = col_basis.create_color_dict_list(new_amp)
240
241 try:
242
243
244
245 col_index = self.stored_quantities["list_colorize"].index(colorize_obj)
246 except ValueError:
247
248
249 self.stored_quantities['list_colorize'].append(colorize_obj)
250 col_basis.build()
251 self.stored_quantities['list_color_basis'].append(col_basis)
252 col_matrix = color_amp.ColorMatrix(col_basis)
253 self.stored_quantities['list_color_matrices'].append(col_matrix)
254 col_index = -1
255
256
257 matrix_element.set('color_basis',
258 self.stored_quantities['list_color_basis'][col_index])
259 matrix_element.set('color_matrix',
260 self.stored_quantities['list_color_matrices'][col_index])
261
262
263 if "used_lorentz" not in self.stored_quantities:
264 self.stored_quantities["used_lorentz"] = []
265
266 me_used_lorentz = set(matrix_element.get_used_lorentz())
267 me_used_lorentz = [lorentz for lorentz in me_used_lorentz \
268 if lorentz not in self.store_aloha]
269
270 aloha_model = create_aloha.AbstractALOHAModel(model.get('name'))
271 aloha_model.add_Lorentz_object(model.get('lorentz'))
272 aloha_model.compute_subset(me_used_lorentz)
273
274
275 aloha_routines = []
276 for routine in aloha_model.values():
277 aloha_routines.append(routine.write(output_dir = None,
278 mode='mg5',
279 language = 'Python'))
280 for routine in aloha_model.external_routines:
281 aloha_routines.append(
282 open(aloha_model.locate_external(routine, 'Python')).read())
283
284
285 previous_globals = list(globals().keys())
286 for routine in aloha_routines:
287 exec(routine, globals())
288 for key in globals().keys():
289 if key not in previous_globals:
290 ADDED_GLOBAL.append(key)
291
292
293 self.store_aloha.extend(me_used_lorentz)
294
295 exporter = export_python.ProcessExporterPython(matrix_element,
296 self.helas_writer)
297 try:
298 matrix_methods = exporter.get_python_matrix_methods(\
299 gauge_check=gauge_check)
300
301 except helas_call_writers.HelasWriterError, error:
302 logger.info(error)
303 return None
304
305
306
307 if self.reuse:
308
309 exec(matrix_methods[process.shell_string()], globals())
310 ADDED_GLOBAL.append('Matrix_%s' % process.shell_string())
311 else:
312
313 exec(matrix_methods[process.shell_string()])
314
315 if not p:
316 p, w_rambo = self.get_momenta(process, options)
317
318 exec("data = Matrix_%s()" % process.shell_string())
319 if output == "m2":
320 return data.smatrix(p, self.full_model), data.amp2
321 else:
322 m2 = data.smatrix(p,self.full_model)
323 return {'m2': m2, output:getattr(data, output)}
324
325 @staticmethod
327 """ Check whether the specified kinematic point passes isolation cuts
328 """
329
330 def Pt(pmom):
331 """ Computes the pt of a 4-momentum"""
332 return math.sqrt(pmom[1]**2+pmom[2]**2)
333
334 def DeltaR(p1,p2):
335 """ Computes the DeltaR between two 4-momenta"""
336
337 p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2)
338 p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2)
339 eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3]))
340 eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3]))
341
342 phi1=math.atan2(p1[2],p1[1])
343 phi2=math.atan2(p2[2],p2[1])
344 dphi=abs(phi2-phi1)
345
346 dphi=abs(abs(dphi-math.pi)-math.pi)
347
348 return math.sqrt(dphi**2+(eta2-eta1)**2)
349
350 for i, pmom in enumerate(pmoms[2:]):
351
352 if Pt(pmom)<ptcut:
353 return False
354
355 for pmom2 in pmoms[3+i:]:
356 if DeltaR(pmom,pmom2)<drcut:
357 return False
358 return True
359
360
361
362
363 - def get_momenta(self, process, options=None, special_mass=None):
364 """Get a point in phase space for the external states in the given
365 process, with the CM energy given. The incoming particles are
366 assumed to be oriented along the z axis, with particle 1 along the
367 positive z axis.
368 For the CMS check, one must be able to chose the mass of the special
369 resonance particle with id = -1, and the special_mass option allows
370 to specify it."""
371
372 if not options:
373 energy=1000
374 events=None
375 else:
376 energy = options['energy']
377 events = options['events']
378 to_skip = 0
379
380 if not (isinstance(process, base_objects.Process) and \
381 isinstance(energy, (float,int))):
382 raise rambo.RAMBOError, "Not correct type for arguments to get_momenta"
383
384
385 sorted_legs = sorted(process.get('legs'), lambda l1, l2:\
386 l1.get('number') - l2.get('number'))
387
388
389 if events:
390 ids = [l.get('id') for l in sorted_legs]
391 import MadSpin.decay as madspin
392 if not hasattr(self, 'event_file'):
393 fsock = open(events)
394 self.event_file = madspin.Event(fsock)
395
396 skip = 0
397 while self.event_file.get_next_event() != 'no_event':
398 event = self.event_file.particle
399
400 event_ids = [p['pid'] for p in event.values()]
401 if event_ids == ids:
402 skip += 1
403 if skip > to_skip:
404 break
405 else:
406 raise MadGraph5Error, 'No compatible events for %s' % ids
407 p = []
408 for part in event.values():
409 m = part['momentum']
410 p.append([m.E, m.px, m.py, m.pz])
411 return p, 1
412
413 nincoming = len([leg for leg in sorted_legs if leg.get('state') == False])
414 nfinal = len(sorted_legs) - nincoming
415
416
417 mass = []
418 for l in sorted_legs:
419 if l.get('id') != 0:
420 mass_string = self.full_model.get_particle(l.get('id')).get('mass')
421 mass.append(self.full_model.get('parameter_dict')[mass_string].real)
422 else:
423 if isinstance(special_mass, float):
424 mass.append(special_mass)
425 else:
426 raise Exception, "A 'special_mass' option must be specified"+\
427 " in get_momenta when a leg with id=-10 is present (for CMS check)"
428
429
430
431
432
433
434
435
436
437 energy = max(energy, sum(mass[:nincoming])*1.2,sum(mass[nincoming:])*1.2)
438
439
440
441
442
443
444
445 if nfinal == 1:
446 p = []
447 energy = mass[-1]
448 p.append([energy/2,0,0,energy/2])
449 p.append([energy/2,0,0,-energy/2])
450 p.append([mass[-1],0,0,0])
451 return p, 1.0
452
453 e2 = energy**2
454 m1 = mass[0]
455 p = []
456
457 masses = rambo.FortranList(nfinal)
458 for i in range(nfinal):
459 masses[i+1] = mass[nincoming + i]
460
461 if nincoming == 1:
462
463 p.append([abs(m1), 0., 0., 0.])
464 p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses)
465
466 for i in range(1, nfinal+1):
467 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
468 p_rambo[(2,i)], p_rambo[(3,i)]]
469 p.append(momi)
470
471 return p, w_rambo
472
473 if nincoming != 2:
474 raise rambo.RAMBOError('Need 1 or 2 incoming particles')
475
476 if nfinal == 1:
477 energy = masses[1]
478 if masses[1] == 0.0:
479 raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\
480 ' state particle massless is invalid')
481
482 e2 = energy**2
483 m2 = mass[1]
484
485 mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \
486 2*m1**2*m2**2 + m2**4) / (4*e2))
487 e1 = math.sqrt(mom**2+m1**2)
488 e2 = math.sqrt(mom**2+m2**2)
489
490 p.append([e1, 0., 0., mom])
491 p.append([e2, 0., 0., -mom])
492
493 if nfinal == 1:
494 p.append([energy, 0., 0., 0.])
495 return p, 1.
496
497 p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses)
498
499
500 for i in range(1, nfinal+1):
501 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
502 p_rambo[(2,i)], p_rambo[(3,i)]]
503 p.append(momi)
504
505 return p, w_rambo
506
512 """Class taking care of matrix element evaluation for loop processes."""
513
514 - def __init__(self,cuttools_dir=None, output_path=None, tir_dir={},
515 cmd=FakeInterface(),*args,**kwargs):
516 """Allow for initializing the MG5 root where the temporary fortran
517 output for checks is placed."""
518
519 super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs)
520
521 self.mg_root=self.cmd._mgme_dir
522
523 if output_path is None:
524 self.output_path = self.cmd._mgme_dir
525 else:
526 self.output_path = output_path
527
528 self.cuttools_dir=cuttools_dir
529 self.tir_dir=tir_dir
530 self.loop_optimized_output = cmd.options['loop_optimized_output']
531
532
533 self.proliferate=True
534
535
536
537
538 - def evaluate_matrix_element(self, matrix_element, p=None, options=None,
539 gauge_check=False, auth_skipping=None, output='m2',
540 PS_name = None, MLOptions={}):
541 """Calculate the matrix element and evaluate it for a phase space point
542 Output can only be 'm2. The 'jamp' and 'amp' returned values are just
543 empty lists at this point.
544 If PS_name is not none the written out PS.input will be saved in
545 the file PS.input_<PS_name> as well."""
546
547 process = matrix_element.get('processes')[0]
548 model = process.get('model')
549
550 if options and 'split_orders' in options.keys():
551 split_orders = options['split_orders']
552 else:
553 split_orders = -1
554
555 if "loop_matrix_elements" not in self.stored_quantities:
556 self.stored_quantities['loop_matrix_elements'] = []
557
558 if (auth_skipping or self.auth_skipping) and matrix_element in \
559 [el[0] for el in self.stored_quantities['loop_matrix_elements']]:
560
561 logger.info("Skipping %s, " % process.nice_string() + \
562 "identical matrix element already tested" )
563 return None
564
565
566 if not p:
567 p, w_rambo = self.get_momenta(process, options=options)
568
569 if matrix_element in [el[0] for el in \
570 self.stored_quantities['loop_matrix_elements']]:
571 export_dir=self.stored_quantities['loop_matrix_elements'][\
572 [el[0] for el in self.stored_quantities['loop_matrix_elements']\
573 ].index(matrix_element)][1]
574 logger.debug("Reusing generated output %s"%str(export_dir))
575 else:
576 export_dir=pjoin(self.output_path,temp_dir_prefix)
577 if os.path.isdir(export_dir):
578 if not self.proliferate:
579 raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir))
580 else:
581 id=1
582 while os.path.isdir(pjoin(self.output_path,\
583 '%s_%i'%(temp_dir_prefix,id))):
584 id+=1
585 export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id))
586
587 if self.proliferate:
588 self.stored_quantities['loop_matrix_elements'].append(\
589 (matrix_element,export_dir))
590
591
592
593 import madgraph.loop.loop_exporters as loop_exporters
594 if self.loop_optimized_output:
595 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
596 else:
597 exporter_class=loop_exporters.LoopProcessExporterFortranSA
598
599 MLoptions = {'clean': True,
600 'complex_mass': self.cmass_scheme,
601 'export_format':'madloop',
602 'mp':True,
603 'SubProc_prefix':'P',
604 'compute_color_flows': not process.get('has_born'),
605 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
606 'cuttools_dir': self.cuttools_dir,
607 'fortran_compiler': self.cmd.options['fortran_compiler'],
608 'output_dependencies': self.cmd.options['output_dependencies']}
609
610 MLoptions.update(self.tir_dir)
611
612 FortranExporter = exporter_class(\
613 self.mg_root, export_dir, MLoptions)
614 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
615 FortranExporter.copy_v4template(modelname=model.get('name'))
616 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel)
617 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
618 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
619 for c in l]))
620 FortranExporter.convert_model_to_mg4(model,wanted_lorentz,wanted_couplings)
621 FortranExporter.finalize_v4_directory(None,"",False,False,compiler=
622 {'fortran':self.cmd.options['fortran_compiler'],
623 'f2py':self.cmd.options['fortran_compiler'],
624 'cpp':self.cmd.options['fortran_compiler']})
625
626 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
627 split_orders=split_orders)
628
629 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
630 mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions)
631
632 if gauge_check:
633 file_path, orig_file_content, new_file_content = \
634 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
635 ['helas_calls_ampb_1.f','loop_matrix.f'])
636 file = open(file_path,'w')
637 file.write(new_file_content)
638 file.close()
639 if self.loop_optimized_output:
640 mp_file_path, mp_orig_file_content, mp_new_file_content = \
641 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
642 ['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True)
643 mp_file = open(mp_file_path,'w')
644 mp_file.write(mp_new_file_content)
645 mp_file.close()
646
647
648 finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\
649 export_dir, p, PS_name = PS_name, verbose=False)[0][0]
650
651
652 if gauge_check:
653 file = open(file_path,'w')
654 file.write(orig_file_content)
655 file.close()
656 if self.loop_optimized_output:
657 mp_file = open(mp_file_path,'w')
658 mp_file.write(mp_orig_file_content)
659 mp_file.close()
660
661
662 if not self.proliferate:
663 shutil.rmtree(export_dir)
664
665 if output == "m2":
666
667
668 return finite_m2, []
669 else:
670 return {'m2': finite_m2, output:[]}
671
672 - def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False,
673 DoubleCheckHelicityFilter=False, MLOptions={}):
674 """ Set parameters in MadLoopParams.dat suited for these checks.MP
675 stands for multiple precision and can either be a bool or an integer
676 to specify the mode."""
677
678
679 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r')
680 MLCard = bannermod.MadLoopParam(file)
681
682 if isinstance(mp,bool):
683 mode = 4 if mp else 1
684 else:
685 mode = mp
686
687 for key, value in MLOptions.items():
688 if key == "MLReductionLib":
689 if isinstance(value, int):
690 ml_reds = str(value)
691 if isinstance(value,list):
692 if len(value)==0:
693 ml_reds = '1'
694 else:
695 ml_reds="|".join([str(vl) for vl in value])
696 elif isinstance(value, str):
697 ml_reds = value
698 elif isinstance(value, int):
699 ml_reds = str(value)
700 else:
701 raise MadGraph5Error, 'The argument %s '%str(value)+\
702 ' in fix_MadLoopParamCard must be a string, integer'+\
703 ' or a list.'
704 MLCard.set("MLReductionLib",ml_reds)
705 elif key == 'ImprovePS':
706 MLCard.set('ImprovePSPoint',2 if value else -1)
707 elif key == 'ForceMP':
708 mode = 4
709 elif key in MLCard:
710 MLCard.set(key,value)
711 else:
712 raise Exception, 'The MadLoop options %s specified in function'%key+\
713 ' fix_MadLoopParamCard does not correspond to an option defined'+\
714 ' MadLoop nor is it specially handled in this function.'
715 if not mode is None:
716 MLCard.set('CTModeRun',mode)
717 MLCard.set('CTModeInit',mode)
718 MLCard.set('UseLoopFilter',loop_filter)
719 MLCard.set('DoubleCheckHelicityFilter',DoubleCheckHelicityFilter)
720
721 MLCard.write(pjoin(dir_name,os.pardir,'SubProcesses','MadLoopParams.dat'))
722
723 @classmethod
724 - def get_me_value(cls, proc, proc_id, working_dir, PSpoint=[], PS_name = None,
725 verbose=True, format='tuple', skip_compilation=False):
726 """Compile and run ./check, then parse the output and return the result
727 for process with id = proc_id and PSpoint if specified.
728 If PS_name is not none the written out PS.input will be saved in
729 the file PS.input_<PS_name> as well"""
730 if verbose:
731 sys.stdout.write('.')
732 sys.stdout.flush()
733
734 shell_name = None
735 directories = misc.glob('P%i_*' % proc_id, pjoin(working_dir, 'SubProcesses'))
736 if directories and os.path.isdir(directories[0]):
737 shell_name = os.path.basename(directories[0])
738
739
740 if not shell_name:
741 logging.info("Directory hasn't been created for process %s" %proc)
742 return ((0.0, 0.0, 0.0, 0.0, 0), [])
743
744 if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name))
745
746 dir_name = pjoin(working_dir, 'SubProcesses', shell_name)
747 if not skip_compilation:
748
749 if os.path.isfile(pjoin(dir_name,'check')):
750 os.remove(pjoin(dir_name,'check'))
751 try:
752 os.remove(pjoin(dir_name,'check_sa.o'))
753 os.remove(pjoin(dir_name,'loop_matrix.o'))
754 except OSError:
755 pass
756
757 devnull = open(os.devnull, 'w')
758 retcode = subprocess.call(['make','check'],
759 cwd=dir_name, stdout=devnull, stderr=devnull)
760 devnull.close()
761
762 if retcode != 0:
763 logging.info("Error while executing make in %s" % shell_name)
764 return ((0.0, 0.0, 0.0, 0.0, 0), [])
765
766
767 if PSpoint:
768 misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint)
769
770
771 if not PS_name is None:
772 misc.write_PS_input(pjoin(dir_name, \
773 'PS.input_%s'%PS_name),PSpoint)
774
775 try:
776 output = subprocess.Popen('./check',
777 cwd=dir_name,
778 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
779 output.read()
780 output.close()
781 if os.path.exists(pjoin(dir_name,'result.dat')):
782 return cls.parse_check_output(file(pjoin(dir_name,\
783 'result.dat')),format=format)
784 else:
785 logging.warning("Error while looking for file %s"%str(os.path\
786 .join(dir_name,'result.dat')))
787 return ((0.0, 0.0, 0.0, 0.0, 0), [])
788 except IOError:
789 logging.warning("Error while executing ./check in %s" % shell_name)
790 return ((0.0, 0.0, 0.0, 0.0, 0), [])
791
792 @classmethod
794 """Parse the output string and return a pair where first four values are
795 the finite, born, single and double pole of the ME and the fourth is the
796 GeV exponent and the second value is a list of 4 momenta for all particles
797 involved. Return the answer in two possible formats, 'tuple' or 'dict'."""
798
799 res_dict = {'res_p':[],
800 'born':0.0,
801 'finite':0.0,
802 '1eps':0.0,
803 '2eps':0.0,
804 'gev_pow':0,
805 'export_format':'Default',
806 'accuracy':0.0,
807 'return_code':0,
808 'Split_Orders_Names':[],
809 'Loop_SO_Results':[],
810 'Born_SO_Results':[],
811 'Born_kept':[],
812 'Loop_kept':[]
813 }
814 res_p = []
815
816
817
818 if isinstance(output,file) or isinstance(output,list):
819 text=output
820 elif isinstance(output,str):
821 text=output.split('\n')
822 else:
823 raise MadGraph5Error, 'Type for argument output not supported in'+\
824 ' parse_check_output.'
825 for line in text:
826 splitline=line.split()
827 if len(splitline)==0:
828 continue
829 elif splitline[0]=='PS':
830 res_p.append([float(s) for s in splitline[1:]])
831 elif splitline[0]=='ASO2PI':
832 res_dict['alphaS_over_2pi']=float(splitline[1])
833 elif splitline[0]=='BORN':
834 res_dict['born']=float(splitline[1])
835 elif splitline[0]=='FIN':
836 res_dict['finite']=float(splitline[1])
837 elif splitline[0]=='1EPS':
838 res_dict['1eps']=float(splitline[1])
839 elif splitline[0]=='2EPS':
840 res_dict['2eps']=float(splitline[1])
841 elif splitline[0]=='EXP':
842 res_dict['gev_pow']=int(splitline[1])
843 elif splitline[0]=='Export_Format':
844 res_dict['export_format']=splitline[1]
845 elif splitline[0]=='ACC':
846 res_dict['accuracy']=float(splitline[1])
847 elif splitline[0]=='RETCODE':
848 res_dict['return_code']=int(splitline[1])
849 elif splitline[0]=='Split_Orders_Names':
850 res_dict['Split_Orders_Names']=splitline[1:]
851 elif splitline[0] in ['Born_kept', 'Loop_kept']:
852 res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]]
853 elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']:
854
855
856
857
858 res_dict[splitline[0]].append(\
859 ([int(el) for el in splitline[1:]],{}))
860 elif splitline[0]=='SO_Loop':
861 res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\
862 float(splitline[2])
863 elif splitline[0]=='SO_Born':
864 res_dict['Born_SO_Results'][-1][1][splitline[1]]=\
865 float(splitline[2])
866
867 res_dict['res_p'] = res_p
868
869 if format=='tuple':
870 return ((res_dict['finite'],res_dict['born'],res_dict['1eps'],
871 res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p'])
872 else:
873 return res_dict
874
875 @staticmethod
877 """ Changes the file model_functions.f in the SOURCE of the process output
878 so as to change how logarithms are analytically continued and see how
879 it impacts the CMS check."""
880 valid_modes = ['default','recompile']
881 if not (mode in valid_modes or (isinstance(mode, list) and
882 len(mode)==2 and all(m in ['logp','logm','log'] for m in mode))):
883 raise MadGraph5Error("Mode '%s' not reckonized"%mode+
884 " in function apply_log_tweak.")
885
886 model_path = pjoin(proc_path,'Source','MODEL')
887 directories = misc.glob('P0_*', pjoin(proc_path,'SubProcesses'))
888 if directories and os.path.isdir(directories[0]):
889 exe_path = directories[0]
890 else:
891 raise MadGraph5Error, 'Could not find a process executable '+\
892 'directory in %s'%proc_dir
893 bu_path = pjoin(model_path, 'model_functions.f__backUp__')
894
895 if mode=='default':
896
897 if not os.path.isfile(bu_path):
898 raise MadGraph5Error, 'Back up file %s could not be found.'%bu_path
899 shutil.move(bu_path, pjoin(model_path, 'model_functions.f'))
900 return
901
902 if mode=='recompile':
903 try:
904 os.remove(pjoin(model_path,'model_functions.o'))
905 os.remove(pjoin(proc_path,'lib','libmodel.a'))
906 except:
907 pass
908 misc.compile(cwd=model_path)
909
910 try:
911 os.remove(pjoin(exe_path,'check'))
912 except:
913 pass
914 misc.compile(arg=['check'], cwd=exe_path)
915 return
916
917 if mode[0]==mode[1]:
918 return
919
920
921 mp_prefix = 'MP_'
922 target_line = 'FUNCTION %%sREG%s(ARG)'%mode[0].lower()
923
924
925 if not os.path.isfile(bu_path):
926 shutil.copy(pjoin(model_path, 'model_functions.f'), bu_path)
927 model_functions = open(pjoin(model_path,'model_functions.f'),'r')
928
929 new_model_functions = []
930 has_replaced = False
931 just_replaced = False
932 find_one_replacement= False
933 mp_mode = None
934 suffix = {'log':'','logp':r'\s*\+\s*TWOPII','logm':r'\s*\-\s*TWOPII'}
935 replace_regex=r'^\s*%%sREG%s\s*=\s*LOG\(ARG\)%s'%(mode[0],suffix[mode[0]])
936 for line in model_functions:
937
938 if just_replaced:
939 if not re.match(r'\s{6}', line):
940 continue
941 else:
942 just_replaced = False
943 if mp_mode is None:
944
945 new_model_functions.append(line)
946 if (target_line%mp_prefix).lower() in line.lower():
947 mp_mode = mp_prefix
948 elif (target_line%'').lower() in line.lower():
949 mp_mode = ''
950 else:
951
952 if not has_replaced and re.match(replace_regex%mp_mode,line,
953 re.IGNORECASE):
954
955 if mode[0]=='log':
956 if mp_mode=='':
957 new_line =\
958 """ if(dble(arg).lt.0.0d0.and.dimag(arg).gt.0.0d0)then
959 reg%s=log(arg) %s TWOPII
960 else
961 reg%s=log(arg)
962 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
963 else:
964 new_line =\
965 """ if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).lt.0.0e0_16)then
966 mp_reg%s=log(arg) %s TWOPII
967 else
968 mp_reg%s=log(arg)
969 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
970 else:
971 new_line = ' '*6+"%sreg%s=log(arg) %s\n"%(mp_mode,mode[0],
972 ('' if mode[1]=='log' else ('+TWOPII' if mode[1]=='logp' else '-TWOPII')))
973 new_model_functions.append(new_line)
974 just_replaced = True
975 has_replaced = True
976 find_one_replacement = True
977 else:
978 new_model_functions.append(line)
979 if re.match(r'^\s*END\s*$',line,re.IGNORECASE):
980 mp_mode = None
981 has_replaced = False
982
983 if not find_one_replacement:
984 logger.warning('No replacement was found/performed for token '+
985 "'%s->%s'."%(mode[0],mode[1]))
986 else:
987 open(pjoin(model_path,'model_functions.f'),'w').\
988 write(''.join(new_model_functions))
989 return
990
992 """ Modify loop_matrix.f so to have one external massless gauge boson
993 polarization vector turned into its momentum. It is not a pretty and
994 flexible solution but it works for this particular case."""
995
996 shell_name = None
997 directories = misc.glob('P0_*', working_dir)
998 if directories and os.path.isdir(directories[0]):
999 shell_name = os.path.basename(directories[0])
1000
1001 dir_name = pjoin(working_dir, shell_name)
1002
1003
1004 ind=0
1005 while ind<len(file_names) and not os.path.isfile(pjoin(dir_name,
1006 file_names[ind])):
1007 ind += 1
1008 if ind==len(file_names):
1009 raise Exception, "No helas calls output file found."
1010
1011 helas_file_name=pjoin(dir_name,file_names[ind])
1012 file = open(pjoin(dir_name,helas_file_name), 'r')
1013
1014 helas_calls_out=""
1015 original_file=""
1016 gaugeVectorRegExp=re.compile(\
1017 r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+
1018 r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)")
1019 foundGauge=False
1020
1021 for line in file:
1022 helas_calls_out+=line
1023 original_file+=line
1024 if line.find("INCLUDE 'coupl.inc'") != -1 or \
1025 line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1:
1026 helas_calls_out+=" INTEGER WARDINT\n"
1027 if not foundGauge:
1028 res=gaugeVectorRegExp.search(line)
1029 if res!=None:
1030 foundGauge=True
1031 helas_calls_out+=" DO WARDINT=1,4\n"
1032 helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")="
1033 if not mp:
1034 helas_calls_out+=\
1035 "DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n"
1036 else:
1037 helas_calls_out+="CMPLX(P(WARDINT-1,"+\
1038 res.group('p_id')+"),0.0E0_16,KIND=16)\n"
1039 helas_calls_out+=" ENDDO\n"
1040 file.close()
1041
1042 return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
1043
1048 """Class taking care of matrix element evaluation and running timing for
1049 loop processes."""
1050
1054
1055 @classmethod
1057 """ Return a dictionary of the parameter of the MadLoopParamCard.
1058 The key is the name of the parameter and the value is the corresponding
1059 string read from the card."""
1060
1061 return bannermod.MadLoopParam(MLCardPath)
1062
1063
1064 @classmethod
1066 """ Set the parameters in MadLoopParamCard to the values specified in
1067 the dictionary params.
1068 The key is the name of the parameter and the value is the corresponding
1069 string to write in the card."""
1070
1071 MLcard = bannermod.MadLoopParam(MLCardPath)
1072 for key,value in params.items():
1073 MLcard.set(key, value, ifnotdefault=False)
1074
1075 MLcard.write(MLCardPath, commentdefault=True)
1076
1078 """ Edit loop_matrix.f in order to skip the loop evaluation phase.
1079 Notice this only affects the double precision evaluation which is
1080 normally fine as we do not make the timing check on mp."""
1081
1082 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1083 loop_matrix = file.read()
1084 file.close()
1085
1086 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1087 loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.'
1088 if skip else '.FALSE.'), loop_matrix)
1089 file.write(loop_matrix)
1090 file.close()
1091
1093 """ Edit loop_matrix.f in order to set the flag which stops the
1094 execution after booting the program (i.e. reading the color data)."""
1095
1096 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1097 loop_matrix = file.read()
1098 file.close()
1099
1100 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1101 loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.'
1102 if bootandstop else '.FALSE.'), loop_matrix)
1103 file.write(loop_matrix)
1104 file.close()
1105
1106 - def setup_process(self, matrix_element, export_dir, reusing = False,
1107 param_card = None, MLOptions={},clean=True):
1108 """ Output the matrix_element in argument and perform the initialization
1109 while providing some details about the output in the dictionary returned.
1110 Returns None if anything fails"""
1111
1112 infos={'Process_output': None,
1113 'HELAS_MODEL_compilation' : None,
1114 'dir_path' : None,
1115 'Initialization' : None,
1116 'Process_compilation' : None}
1117
1118 if not reusing and clean:
1119 if os.path.isdir(export_dir):
1120 clean_up(self.output_path)
1121 if os.path.isdir(export_dir):
1122 raise InvalidCmd(\
1123 "The directory %s already exist. Please remove it."\
1124 %str(export_dir))
1125 else:
1126 if not os.path.isdir(export_dir):
1127 raise InvalidCmd(\
1128 "Could not find the directory %s to reuse."%str(export_dir))
1129
1130
1131 if not reusing and clean:
1132 model = matrix_element['processes'][0].get('model')
1133
1134
1135 import madgraph.loop.loop_exporters as loop_exporters
1136 if self.loop_optimized_output:
1137 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
1138 else:
1139 exporter_class=loop_exporters.LoopProcessExporterFortranSA
1140
1141 MLoptions = {'clean': True,
1142 'complex_mass': self.cmass_scheme,
1143 'export_format':'madloop',
1144 'mp':True,
1145 'SubProc_prefix':'P',
1146 'compute_color_flows':not matrix_element['processes'][0].get('has_born'),
1147 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
1148 'cuttools_dir': self.cuttools_dir,
1149 'fortran_compiler':self.cmd.options['fortran_compiler'],
1150 'output_dependencies':self.cmd.options['output_dependencies']}
1151
1152 MLoptions.update(self.tir_dir)
1153
1154 start=time.time()
1155 FortranExporter = exporter_class(self.mg_root, export_dir, MLoptions)
1156 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
1157 FortranExporter.copy_v4template(modelname=model.get('name'))
1158 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel)
1159 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
1160 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
1161 for c in l]))
1162 FortranExporter.convert_model_to_mg4(self.full_model,wanted_lorentz,wanted_couplings)
1163 infos['Process_output'] = time.time()-start
1164 start=time.time()
1165 FortranExporter.finalize_v4_directory(None,"",False,False,compiler=
1166 {'fortran':self.cmd.options['fortran_compiler'],
1167 'f2py':self.cmd.options['fortran_compiler'],
1168 'cpp':self.cmd.options['fortran_compiler']})
1169 infos['HELAS_MODEL_compilation'] = time.time()-start
1170
1171
1172 if param_card != None:
1173 if isinstance(param_card, str):
1174 cp(pjoin(param_card),\
1175 pjoin(export_dir,'Cards','param_card.dat'))
1176 else:
1177 param_card.write(pjoin(export_dir,'Cards','param_card.dat'))
1178
1179
1180
1181 MadLoopInitializer.fix_PSPoint_in_check(
1182 pjoin(export_dir,'SubProcesses'), read_ps = False, npoints = 4)
1183
1184 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
1185 mp = False, loop_filter = True,MLOptions=MLOptions)
1186
1187 shell_name = None
1188 directories = misc.glob('P0_*', pjoin(export_dir, 'SubProcesses'))
1189 if directories and os.path.isdir(directories[0]):
1190 shell_name = os.path.basename(directories[0])
1191 dir_name = pjoin(export_dir, 'SubProcesses', shell_name)
1192 infos['dir_path']=dir_name
1193
1194
1195
1196 if not MadLoopInitializer.need_MadLoopInit(
1197 export_dir, subproc_prefix='P'):
1198 return infos
1199
1200 attempts = [3,15]
1201
1202 try:
1203 os.remove(pjoin(dir_name,'check'))
1204 os.remove(pjoin(dir_name,'check_sa.o'))
1205 except OSError:
1206 pass
1207
1208 nPS_necessary = MadLoopInitializer.run_initialization(dir_name,
1209 pjoin(export_dir,'SubProcesses'),infos,\
1210 req_files = ['HelFilter.dat','LoopFilter.dat'],
1211 attempts = attempts)
1212 if attempts is None:
1213 logger.error("Could not compile the process %s,"%shell_name+\
1214 " try to generate it via the 'generate' command.")
1215 return None
1216 if nPS_necessary is None:
1217 logger.error("Could not initialize the process %s"%shell_name+\
1218 " with %s PS points."%max(attempts))
1219 return None
1220 elif nPS_necessary > min(attempts):
1221 logger.warning("Could not initialize the process %s"%shell_name+\
1222 " with %d PS points. It needed %d."%(min(attempts),nPS_necessary))
1223
1224 return infos
1225
1226 - def time_matrix_element(self, matrix_element, reusing = False,
1227 param_card = None, keep_folder = False, options=None,
1228 MLOptions = {}):
1229 """ Output the matrix_element in argument and give detail information
1230 about the timing for its output and running"""
1231
1232
1233
1234 make_it_quick=False
1235
1236 if options and 'split_orders' in options.keys():
1237 split_orders = options['split_orders']
1238 else:
1239 split_orders = -1
1240
1241 assert ((not reusing and isinstance(matrix_element, \
1242 helas_objects.HelasMatrixElement)) or (reusing and
1243 isinstance(matrix_element, base_objects.Process)))
1244 if not reusing:
1245 proc_name = matrix_element['processes'][0].shell_string()[2:]
1246 else:
1247 proc_name = matrix_element.shell_string()[2:]
1248
1249 export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\
1250 temp_dir_prefix+"_%s"%proc_name)
1251
1252 res_timings = self.setup_process(matrix_element,export_dir, \
1253 reusing, param_card,MLOptions = MLOptions,clean=True)
1254
1255 if res_timings == None:
1256 return None
1257 dir_name=res_timings['dir_path']
1258
1259 def check_disk_usage(path):
1260 return subprocess.Popen("du -shc -L "+str(path), \
1261 stdout=subprocess.PIPE, shell=True).communicate()[0].split()[-2]
1262
1263
1264
1265
1266 res_timings['du_source']=check_disk_usage(pjoin(\
1267 export_dir,'Source','*','*.f'))
1268 res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f'))
1269 res_timings['du_color']=check_disk_usage(pjoin(dir_name,
1270 'MadLoop5_resources','*.dat'))
1271 res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check'))
1272
1273 if not res_timings['Initialization']==None:
1274 time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0
1275 elif make_it_quick:
1276 time_per_ps_estimate = -1.0
1277 else:
1278
1279
1280 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1281 read_ps = False, npoints = 3, hel_config = -1,
1282 split_orders=split_orders)
1283 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1284 time_per_ps_estimate = run_time/3.0
1285
1286 self.boot_time_setup(dir_name,bootandstop=True)
1287 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1288 res_timings['Booting_time'] = run_time
1289 self.boot_time_setup(dir_name,bootandstop=False)
1290
1291
1292 contributing_hel=0
1293 n_contrib_hel=0
1294 proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r')
1295 proc_prefix = proc_prefix_file.read()
1296 proc_prefix_file.close()
1297 helicities = file(pjoin(dir_name,'MadLoop5_resources',
1298 '%sHelFilter.dat'%proc_prefix)).read().split()
1299 for i, hel in enumerate(helicities):
1300 if (self.loop_optimized_output and int(hel)>-10000) or hel=='T':
1301 if contributing_hel==0:
1302 contributing_hel=i+1
1303 n_contrib_hel += 1
1304
1305 if contributing_hel==0:
1306 logger.error("Could not find a contributing helicity "+\
1307 "configuration for process %s."%proc_name)
1308 return None
1309
1310 res_timings['n_contrib_hel']=n_contrib_hel
1311 res_timings['n_tot_hel']=len(helicities)
1312
1313
1314 if not make_it_quick:
1315 target_pspoints_number = max(int(30.0/time_per_ps_estimate)+1,50)
1316 else:
1317 target_pspoints_number = 10
1318
1319 logger.info("Checking timing for process %s "%proc_name+\
1320 "with %d PS points."%target_pspoints_number)
1321
1322 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1323 read_ps = False, npoints = target_pspoints_number*2, \
1324 hel_config = contributing_hel, split_orders=split_orders)
1325 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1326
1327 if compile_time == None: return None
1328
1329 res_timings['run_polarized_total']=\
1330 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1331
1332 if make_it_quick:
1333 res_timings['run_unpolarized_total'] = 1.0
1334 res_timings['ram_usage'] = 0.0
1335 else:
1336 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1337 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1338 split_orders=split_orders)
1339 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name,
1340 checkRam=True)
1341
1342 if compile_time == None: return None
1343 res_timings['run_unpolarized_total']=\
1344 (run_time-res_timings['Booting_time'])/target_pspoints_number
1345 res_timings['ram_usage'] = ram_usage
1346
1347 if not self.loop_optimized_output:
1348 return res_timings
1349
1350
1351
1352
1353
1354 self.skip_loop_evaluation_setup(dir_name,skip=True)
1355
1356 if make_it_quick:
1357 res_timings['run_unpolarized_coefs'] = 1.0
1358 else:
1359 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1360 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1361 split_orders=split_orders)
1362 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1363 if compile_time == None: return None
1364 res_timings['run_unpolarized_coefs']=\
1365 (run_time-res_timings['Booting_time'])/target_pspoints_number
1366
1367 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1368 read_ps = False, npoints = target_pspoints_number*2, \
1369 hel_config = contributing_hel, split_orders=split_orders)
1370 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1371 if compile_time == None: return None
1372 res_timings['run_polarized_coefs']=\
1373 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1374
1375
1376 self.skip_loop_evaluation_setup(dir_name,skip=False)
1377
1378 return res_timings
1379
1380
1381
1382
1383
1384 - def check_matrix_element_stability(self, matrix_element,options=None,
1385 infos_IN = None, param_card = None, keep_folder = False,
1386 MLOptions = {}):
1387 """ Output the matrix_element in argument, run in for nPoints and return
1388 a dictionary containing the stability information on each of these points.
1389 If infos are provided, then the matrix element output is skipped and
1390 reused from a previous run and the content of infos.
1391 """
1392
1393 if not options:
1394 reusing = False
1395 nPoints = 100
1396 split_orders = -1
1397 else:
1398 reusing = options['reuse']
1399 nPoints = options['npoints']
1400 split_orders = options['split_orders']
1401
1402 assert ((not reusing and isinstance(matrix_element, \
1403 helas_objects.HelasMatrixElement)) or (reusing and
1404 isinstance(matrix_element, base_objects.Process)))
1405
1406
1407 def format_PS_point(ps, rotation=0):
1408 """ Write out the specified PS point to the file dir_path/PS.input
1409 while rotating it if rotation!=0. We consider only rotations of 90
1410 but one could think of having rotation of arbitrary angle too.
1411 The first two possibilities, 1 and 2 are a rotation and boost
1412 along the z-axis so that improve_ps can still work.
1413 rotation=0 => No rotation
1414 rotation=1 => Z-axis pi/2 rotation
1415 rotation=2 => Z-axis pi/4 rotation
1416 rotation=3 => Z-axis boost
1417 rotation=4 => (x'=z,y'=-x,z'=-y)
1418 rotation=5 => (x'=-z,y'=y,z'=x)"""
1419 if rotation==0:
1420 p_out=copy.copy(ps)
1421 elif rotation==1:
1422 p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps]
1423 elif rotation==2:
1424 sq2 = math.sqrt(2.0)
1425 p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps]
1426 elif rotation==3:
1427 p_out = boost_momenta(ps, 3)
1428
1429
1430 elif rotation==4:
1431 p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps]
1432 elif rotation==5:
1433 p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps]
1434 else:
1435 raise MadGraph5Error("Rotation id %i not implemented"%rotation)
1436
1437 return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
1438
1439 def pick_PS_point(proc, options):
1440 """ Randomly generate a PS point and make sure it is eligible. Then
1441 return it. Users can edit the cuts here if they want."""
1442
1443 p, w_rambo = self.get_momenta(proc, options)
1444 if options['events']:
1445 return p
1446
1447 while (not MatrixElementEvaluator.pass_isolation_cuts(p) and len(p)>3):
1448 p, w_rambo = self.get_momenta(proc, options)
1449
1450
1451
1452
1453 if len(p)==3:
1454 p = boost_momenta(p,3,random.uniform(0.0,0.99))
1455 return p
1456
1457
1458
1459
1460 accuracy_threshold=1.0e-1
1461
1462
1463
1464 num_rotations = 1
1465
1466 if "MLReductionLib" not in MLOptions:
1467 tools=[1]
1468 else:
1469 tools=MLOptions["MLReductionLib"]
1470 tools=list(set(tools))
1471
1472
1473 tool_var={'pjfry':2,'golem':4,'samurai':5,'ninja':6}
1474 for tool in ['pjfry','golem','samurai','ninja']:
1475 tool_dir='%s_dir'%tool
1476 if not tool_dir in self.tir_dir:
1477 continue
1478 tool_libpath=self.tir_dir[tool_dir]
1479 tool_libname="lib%s.a"%tool
1480 if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \
1481 or (not os.path.isfile(pjoin(tool_libpath,tool_libname))):
1482 if tool_var[tool] in tools:
1483 tools.remove(tool_var[tool])
1484 if not tools:
1485 return None
1486
1487
1488 if not reusing:
1489 process = matrix_element['processes'][0]
1490 else:
1491 process = matrix_element
1492 proc_name = process.shell_string()[2:]
1493 export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\
1494 temp_dir_prefix+"_%s"%proc_name)
1495
1496 tools_name={1:'CutTools',2:'PJFry++',3:'IREGI',4:'Golem95',5:'Samurai',
1497 6:'Ninja'}
1498 return_dict={}
1499 return_dict['Stability']={}
1500 infos_save={'Process_output': None,
1501 'HELAS_MODEL_compilation' : None,
1502 'dir_path' : None,
1503 'Initialization' : None,
1504 'Process_compilation' : None}
1505
1506 for tool in tools:
1507 tool_name=tools_name[tool]
1508
1509
1510
1511
1512
1513 DP_stability = []
1514 QP_stability = []
1515
1516 Unstable_PS_points = []
1517
1518 Exceptional_PS_points = []
1519
1520 MLoptions={}
1521 MLoptions["MLReductionLib"]=tool
1522 clean = (tool==tools[0]) and not nPoints==0
1523 if infos_IN==None or (tool_name not in infos_IN):
1524 infos=infos_IN
1525 else:
1526 infos=infos_IN[tool_name]
1527
1528 if not infos:
1529 infos = self.setup_process(matrix_element,export_dir, \
1530 reusing, param_card,MLoptions,clean)
1531 if not infos:
1532 return None
1533
1534 if clean:
1535 infos_save['Process_output']=infos['Process_output']
1536 infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation']
1537 infos_save['dir_path']=infos['dir_path']
1538 infos_save['Process_compilation']=infos['Process_compilation']
1539 else:
1540 if not infos['Process_output']:
1541 infos['Process_output']=infos_save['Process_output']
1542 if not infos['HELAS_MODEL_compilation']:
1543 infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation']
1544 if not infos['dir_path']:
1545 infos['dir_path']=infos_save['dir_path']
1546 if not infos['Process_compilation']:
1547 infos['Process_compilation']=infos_save['Process_compilation']
1548
1549 dir_path=infos['dir_path']
1550
1551
1552 savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool]
1553 data_i = 0
1554
1555 if reusing:
1556
1557 data_i=0
1558 while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))):
1559 pickle_path = pjoin(dir_path,savefile%('_%d'%data_i))
1560 saved_run = save_load_object.load_from_file(pickle_path)
1561 if data_i>0:
1562 logger.info("Loading additional data stored in %s."%
1563 str(pickle_path))
1564 logger.info("Loaded data moved to %s."%str(pjoin(
1565 dir_path,'LOADED_'+savefile%('_%d'%data_i))))
1566 shutil.move(pickle_path,
1567 pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i)))
1568 DP_stability.extend(saved_run['DP_stability'])
1569 QP_stability.extend(saved_run['QP_stability'])
1570 Unstable_PS_points.extend(saved_run['Unstable_PS_points'])
1571 Exceptional_PS_points.extend(saved_run['Exceptional_PS_points'])
1572 data_i += 1
1573
1574 return_dict['Stability'][tool_name] = {'DP_stability':DP_stability,
1575 'QP_stability':QP_stability,
1576 'Unstable_PS_points':Unstable_PS_points,
1577 'Exceptional_PS_points':Exceptional_PS_points}
1578
1579 if nPoints==0:
1580 if len(return_dict['Stability'][tool_name]['DP_stability'])!=0:
1581
1582 if data_i>1:
1583 save_load_object.save_to_file(pjoin(dir_path,
1584 savefile%'_0'),return_dict['Stability'][tool_name])
1585 continue
1586 else:
1587 logger.info("ERROR: Not reusing a directory or any pickled"+
1588 " result for tool %s and the number"%tool_name+\
1589 " of point for the check is zero.")
1590 return None
1591
1592 logger.info("Checking stability of process %s "%proc_name+\
1593 "with %d PS points by %s."%(nPoints,tool_name))
1594 if infos['Initialization'] != None:
1595 time_per_ps_estimate = (infos['Initialization']/4.0)/2.0
1596 sec_needed = int(time_per_ps_estimate*nPoints*4)
1597 else:
1598 sec_needed = 0
1599
1600 progress_bar = None
1601 time_info = False
1602 if sec_needed>5:
1603 time_info = True
1604 logger.info("This check should take about "+\
1605 "%s to run. Started on %s."%(\
1606 str(datetime.timedelta(seconds=sec_needed)),\
1607 datetime.datetime.now().strftime("%d-%m-%Y %H:%M")))
1608 if logger.getEffectiveLevel()<logging.WARNING and \
1609 (sec_needed>5 or (reusing and infos['Initialization'] == None)):
1610 widgets = ['Stability check:', pbar.Percentage(), ' ',
1611 pbar.Bar(),' ', pbar.ETA(), ' ']
1612 progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints,
1613 fd=sys.stdout)
1614 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1615 read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders)
1616
1617
1618
1619 try:
1620 os.remove(pjoin(dir_path,'check'))
1621 os.remove(pjoin(dir_path,'check_sa.o'))
1622 except OSError:
1623 pass
1624
1625 devnull = open(os.devnull, 'w')
1626 retcode = subprocess.call(['make','check'],
1627 cwd=dir_path, stdout=devnull, stderr=devnull)
1628 devnull.close()
1629 if retcode != 0:
1630 logging.info("Error while executing make in %s" % dir_path)
1631 return None
1632
1633
1634
1635
1636 if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')):
1637
1638
1639 if os.path.isfile(pjoin(dir_path,'born_matrix.f')):
1640 checkerName = 'StabilityCheckDriver.f'
1641 else:
1642 checkerName = 'StabilityCheckDriver_loop_induced.f'
1643
1644 with open(pjoin(self.mg_root,'Template','loop_material','Checks',
1645 checkerName),'r') as checkerFile:
1646 with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix:
1647 checkerToWrite = checkerFile.read()%{'proc_prefix':
1648 proc_prefix.read()}
1649 checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w')
1650 checkerFile.write(checkerToWrite)
1651 checkerFile.close()
1652
1653
1654
1655
1656
1657 if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')):
1658 os.remove(pjoin(dir_path,'StabilityCheckDriver'))
1659 if os.path.isfile(pjoin(dir_path,'loop_matrix.o')):
1660 os.remove(pjoin(dir_path,'loop_matrix.o'))
1661 misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \
1662 mode='fortran', job_specs = False)
1663
1664
1665
1666
1667 if len(process['legs'])==3:
1668 self.fix_MadLoopParamCard(dir_path, mp=False,
1669 loop_filter=False, DoubleCheckHelicityFilter=True)
1670
1671 StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')],
1672 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1673 cwd=dir_path)
1674 start_index = len(DP_stability)
1675 if progress_bar!=None:
1676 progress_bar.start()
1677
1678
1679 interrupted = False
1680
1681
1682 retry = 0
1683
1684 i=start_index
1685 if options and 'events' in options and options['events']:
1686
1687 import MadSpin.decay as madspin
1688 fsock = open(options['events'])
1689 self.event_file = madspin.Event(fsock)
1690 while i<(start_index+nPoints):
1691
1692 qp_dict={}
1693 dp_dict={}
1694 UPS = None
1695 EPS = None
1696
1697 if retry==0:
1698 p = pick_PS_point(process, options)
1699
1700 try:
1701 if progress_bar!=None:
1702 progress_bar.update(i+1-start_index)
1703
1704 PSPoint = format_PS_point(p,0)
1705 dp_res=[]
1706 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1707 split_orders=split_orders))
1708 dp_dict['CTModeA']=dp_res[-1]
1709 dp_res.append(self.get_me_value(StabChecker,PSPoint,2,
1710 split_orders=split_orders))
1711 dp_dict['CTModeB']=dp_res[-1]
1712 for rotation in range(1,num_rotations+1):
1713 PSPoint = format_PS_point(p,rotation)
1714 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1715 split_orders=split_orders))
1716 dp_dict['Rotation%i'%rotation]=dp_res[-1]
1717
1718 if any([not res for res in dp_res]):
1719 return None
1720 dp_accuracy =((max(dp_res)-min(dp_res))/
1721 abs(sum(dp_res)/len(dp_res)))
1722 dp_dict['Accuracy'] = dp_accuracy
1723 if dp_accuracy>accuracy_threshold:
1724 if tool in [1,6]:
1725
1726 UPS = [i,p]
1727 qp_res=[]
1728 PSPoint = format_PS_point(p,0)
1729 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1730 split_orders=split_orders))
1731 qp_dict['CTModeA']=qp_res[-1]
1732 qp_res.append(self.get_me_value(StabChecker,PSPoint,5,
1733 split_orders=split_orders))
1734 qp_dict['CTModeB']=qp_res[-1]
1735 for rotation in range(1,num_rotations+1):
1736 PSPoint = format_PS_point(p,rotation)
1737 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1738 split_orders=split_orders))
1739 qp_dict['Rotation%i'%rotation]=qp_res[-1]
1740
1741 if any([not res for res in qp_res]):
1742 return None
1743
1744 qp_accuracy = ((max(qp_res)-min(qp_res))/
1745 abs(sum(qp_res)/len(qp_res)))
1746 qp_dict['Accuracy']=qp_accuracy
1747 if qp_accuracy>accuracy_threshold:
1748 EPS = [i,p]
1749 else:
1750
1751
1752 UPS = [i,p]
1753
1754 except KeyboardInterrupt:
1755 interrupted = True
1756 break
1757 except IOError, e:
1758 if e.errno == errno.EINTR:
1759 if retry==100:
1760 logger.error("Failed hundred times consecutively because"+
1761 " of system call interruptions.")
1762 raise
1763 else:
1764 logger.debug("Recovered from a system call interruption."+\
1765 "PSpoint #%i, Attempt #%i."%(i,retry+1))
1766
1767 time.sleep(0.5)
1768
1769 retry = retry+1
1770
1771 try:
1772 StabChecker.kill()
1773 except Exception:
1774 pass
1775 StabChecker = subprocess.Popen(\
1776 [pjoin(dir_path,'StabilityCheckDriver')],
1777 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
1778 stderr=subprocess.PIPE, cwd=dir_path)
1779 continue
1780 else:
1781 raise
1782
1783
1784
1785 retry = 0
1786
1787 i=i+1
1788
1789
1790 DP_stability.append(dp_dict)
1791 QP_stability.append(qp_dict)
1792 if not EPS is None:
1793 Exceptional_PS_points.append(EPS)
1794 if not UPS is None:
1795 Unstable_PS_points.append(UPS)
1796
1797 if progress_bar!=None:
1798 progress_bar.finish()
1799 if time_info:
1800 logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\
1801 "%d-%m-%Y %H:%M"))
1802
1803
1804 if not interrupted:
1805 StabChecker.stdin.write('y\n')
1806 else:
1807 StabChecker.kill()
1808
1809
1810
1811
1812
1813
1814
1815 save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\
1816 return_dict['Stability'][tool_name])
1817
1818 if interrupted:
1819 break
1820
1821 return_dict['Process'] = matrix_element.get('processes')[0] if not \
1822 reusing else matrix_element
1823 return return_dict
1824
1825 @classmethod
1826 - def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0,
1827 split_orders=-1):
1828 """ This version of get_me_value is simplified for the purpose of this
1829 class. No compilation is necessary. The CT mode can be specified."""
1830
1831
1832 StabChecker.stdin.write('\x1a')
1833 StabChecker.stdin.write('1\n')
1834 StabChecker.stdin.write('%d\n'%mode)
1835 StabChecker.stdin.write('%s\n'%PSpoint)
1836 StabChecker.stdin.write('%.16E\n'%mu_r)
1837 StabChecker.stdin.write('%d\n'%hel)
1838 StabChecker.stdin.write('%d\n'%split_orders)
1839
1840 try:
1841 while True:
1842 output = StabChecker.stdout.readline()
1843 if output != '':
1844 last_non_empty = output
1845 if output==' ##TAG#RESULT_START#TAG##\n':
1846 break
1847
1848 ret_code = StabChecker.poll()
1849 if not ret_code is None:
1850 output = StabChecker.stdout.readline()
1851 if output != '':
1852 last_non_empty = output
1853 error = StabChecker.stderr.readline()
1854 raise MadGraph5Error, \
1855 "The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1856 (ret_code, last_non_empty, error)
1857
1858 res = ""
1859 while True:
1860 output = StabChecker.stdout.readline()
1861 if output != '':
1862 last_non_empty = output
1863 if output==' ##TAG#RESULT_STOP#TAG##\n':
1864 break
1865 else:
1866 res += output
1867 ret_code = StabChecker.poll()
1868 if not ret_code is None:
1869 output = StabChecker.stdout.readline()
1870 if output != '':
1871 last_non_empty = output
1872 error = StabChecker.stderr.readline()
1873 raise MadGraph5Error, \
1874 "The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1875 (ret_code, last_non_empty, error)
1876
1877 return cls.parse_check_output(res,format='tuple')[0][0]
1878 except IOError as e:
1879 logging.warning("Error while running MadLoop. Exception = %s"%str(e))
1880 raise e
1881
1884 """ Perform a python evaluation of the matrix element independently for
1885 all possible helicity configurations for a fixed number of points N and
1886 returns the average for each in the format [[hel_config, eval],...].
1887 This is used to determine what are the vanishing and dependent helicity
1888 configurations at generation time and accordingly setup the output.
1889 This is not yet implemented at LO."""
1890
1891
1892 assert isinstance(process,base_objects.Process)
1893 assert process.get('perturbation_couplings')==[]
1894
1895 N_eval=50
1896
1897 evaluator = MatrixElementEvaluator(process.get('model'), param_card,
1898 auth_skipping = False, reuse = True)
1899
1900 amplitude = diagram_generation.Amplitude(process)
1901 matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False)
1902
1903 cumulative_helEvals = []
1904
1905 for i in range(N_eval):
1906 p, w_rambo = evaluator.get_momenta(process)
1907 helEvals = evaluator.evaluate_matrix_element(\
1908 matrix_element, p = p, output = 'helEvals')['helEvals']
1909 if cumulative_helEvals==[]:
1910 cumulative_helEvals=copy.copy(helEvals)
1911 else:
1912 cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \
1913 enumerate(cumulative_helEvals)]
1914
1915
1916 cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals]
1917
1918
1919
1920 clean_added_globals(ADDED_GLOBAL)
1921
1922 return cumulative_helEvals
1923
1926 """A wrapper function for running an iteration of a function over
1927 a multiprocess, without having to first create a process list
1928 (which makes a big difference for very large multiprocesses.
1929 stored_quantities is a dictionary for any quantities that we want
1930 to reuse between runs."""
1931
1932 model = multiprocess.get('model')
1933 isids = [leg.get('ids') for leg in multiprocess.get('legs') \
1934 if not leg.get('state')]
1935 fsids = [leg.get('ids') for leg in multiprocess.get('legs') \
1936 if leg.get('state')]
1937
1938 id_anti_id_dict = {}
1939 for id in set(tuple(sum(isids+fsids, []))):
1940 id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code()
1941 id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id
1942 sorted_ids = []
1943 results = []
1944 for is_prod in apply(itertools.product, isids):
1945 for fs_prod in apply(itertools.product, fsids):
1946
1947
1948 if check_already_checked(is_prod, fs_prod, sorted_ids,
1949 multiprocess, model, id_anti_id_dict):
1950 continue
1951
1952 process = multiprocess.get_process_with_legs(base_objects.LegList(\
1953 [base_objects.Leg({'id': id, 'state':False}) for \
1954 id in is_prod] + \
1955 [base_objects.Leg({'id': id, 'state':True}) for \
1956 id in fs_prod]))
1957
1958 if opt is not None:
1959 if isinstance(opt, dict):
1960 try:
1961 value = opt[process.base_string()]
1962 except Exception:
1963 continue
1964 result = function(process, stored_quantities, value, options=options)
1965 else:
1966 result = function(process, stored_quantities, opt, options=options)
1967 else:
1968 result = function(process, stored_quantities, options=options)
1969
1970 if result:
1971 results.append(result)
1972
1973 return results
1974
1975
1976
1977
1978
1979 -def check_already_checked(is_ids, fs_ids, sorted_ids, process, model,
1980 id_anti_id_dict = {}):
1981 """Check if process already checked, if so return True, otherwise add
1982 process and antiprocess to sorted_ids."""
1983
1984
1985 if id_anti_id_dict:
1986 is_ids = [id_anti_id_dict[id] for id in \
1987 is_ids]
1988 else:
1989 is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \
1990 is_ids]
1991
1992 ids = array.array('i', sorted(is_ids + list(fs_ids)) + \
1993 [process.get('id')])
1994
1995 if ids in sorted_ids:
1996
1997 return True
1998
1999
2000 sorted_ids.append(ids)
2001
2002
2003 return False
2004
2010 """ Generate a loop matrix element from the process definition, and returns
2011 it along with the timing information dictionary.
2012 If reuse is True, it reuses the already output directory if found.
2013 There is the possibility of specifying the proc_name."""
2014
2015 assert isinstance(process_definition,
2016 (base_objects.ProcessDefinition,base_objects.Process))
2017 assert process_definition.get('perturbation_couplings')!=[]
2018
2019 if isinstance(process_definition,base_objects.ProcessDefinition):
2020 if any(len(l.get('ids'))>1 for l in process_definition.get('legs')):
2021 raise InvalidCmd("This check can only be performed on single "+
2022 " processes. (i.e. without multiparticle labels).")
2023
2024 isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2025 if not leg.get('state')]
2026 fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2027 if leg.get('state')]
2028
2029
2030 process = process_definition.get_process(isids,fsids)
2031 else:
2032 process = process_definition
2033
2034 if not output_path is None:
2035 root_path = output_path
2036 else:
2037 root_path = cmd._mgme_dir
2038
2039 timing = {'Diagrams_generation': None,
2040 'n_loops': None,
2041 'HelasDiagrams_generation': None,
2042 'n_loop_groups': None,
2043 'n_loop_wfs': None,
2044 'loop_wfs_ranks': None}
2045
2046 if proc_name:
2047 proc_dir = pjoin(root_path,proc_name)
2048 else:
2049 proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%(
2050 '_'.join(process.shell_string().split('_')[1:])))
2051 if reuse and os.path.isdir(proc_dir):
2052 logger.info("Reusing directory %s"%str(proc_dir))
2053
2054 return timing, process
2055
2056 logger.info("Generating p%s"%process_definition.nice_string()[1:])
2057
2058 start=time.time()
2059 try:
2060 amplitude = loop_diagram_generation.LoopAmplitude(process,
2061 loop_filter=loop_filter)
2062 except InvalidCmd:
2063
2064
2065 return time.time()-start, None
2066 if not amplitude.get('diagrams'):
2067
2068 return time.time()-start, None
2069
2070
2071
2072 loop_optimized_output = cmd.options['loop_optimized_output']
2073 timing['Diagrams_generation']=time.time()-start
2074 timing['n_loops']=len(amplitude.get('loop_diagrams'))
2075 start=time.time()
2076
2077 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2078 optimized_output = loop_optimized_output,gen_color=True)
2079
2080
2081
2082 matrix_element.compute_all_analytic_information()
2083 timing['HelasDiagrams_generation']=time.time()-start
2084
2085 if loop_optimized_output:
2086 timing['n_loop_groups']=len(matrix_element.get('loop_groups'))
2087 lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \
2088 ldiag.get('loop_wavefunctions')]
2089 timing['n_loop_wfs']=len(lwfs)
2090 timing['loop_wfs_ranks']=[]
2091 for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \
2092 for l in lwfs])+1):
2093 timing['loop_wfs_ranks'].append(\
2094 len([1 for l in lwfs if \
2095 l.get_analytic_info('wavefunction_rank')==rank]))
2096
2097 return timing, matrix_element
2098
2099
2100
2101
2102 -def check_profile(process_definition, param_card = None,cuttools="",tir={},
2103 options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
2104 """For a single loop process, check both its timings and then its stability
2105 in one go without regenerating it."""
2106
2107 if 'reuse' not in options:
2108 keep_folder=False
2109 else:
2110 keep_folder = options['reuse']
2111
2112 model=process_definition.get('model')
2113
2114 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2115 keep_folder,output_path=output_path,cmd=cmd)
2116 reusing = isinstance(matrix_element, base_objects.Process)
2117 options['reuse'] = reusing
2118 myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2119 model=model, output_path=output_path, cmd=cmd)
2120
2121 if not myProfiler.loop_optimized_output:
2122 MLoptions={}
2123 else:
2124 MLoptions=MLOptions
2125
2126 timing2 = myProfiler.time_matrix_element(matrix_element, reusing,
2127 param_card, keep_folder=keep_folder,options=options,
2128 MLOptions = MLoptions)
2129
2130 if timing2 == None:
2131 return None, None
2132
2133
2134 timing = dict(timing1.items()+timing2.items())
2135 stability = myProfiler.check_matrix_element_stability(matrix_element,
2136 options=options, infos_IN=timing,param_card=param_card,
2137 keep_folder = keep_folder,
2138 MLOptions = MLoptions)
2139 if stability == None:
2140 return None, None
2141 else:
2142 timing['loop_optimized_output']=myProfiler.loop_optimized_output
2143 stability['loop_optimized_output']=myProfiler.loop_optimized_output
2144 return timing, stability
2145
2146
2147
2148
2149 -def check_stability(process_definition, param_card = None,cuttools="",tir={},
2150 options=None,nPoints=100, output_path=None,
2151 cmd = FakeInterface(), MLOptions = {}):
2152 """For a single loop process, give a detailed summary of the generation and
2153 execution timing."""
2154
2155 if "reuse" in options:
2156 reuse=options['reuse']
2157 else:
2158 reuse=False
2159
2160 reuse=options['reuse']
2161 keep_folder = reuse
2162 model=process_definition.get('model')
2163
2164 timing, matrix_element = generate_loop_matrix_element(process_definition,
2165 reuse, output_path=output_path, cmd=cmd)
2166 reusing = isinstance(matrix_element, base_objects.Process)
2167 options['reuse'] = reusing
2168 myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2169 output_path=output_path,model=model,cmd=cmd)
2170
2171 if not myStabilityChecker.loop_optimized_output:
2172 MLoptions = {}
2173 else:
2174 MLoptions = MLOptions
2175 if "MLReductionLib" not in MLOptions:
2176 MLoptions["MLReductionLib"] = []
2177 if cuttools:
2178 MLoptions["MLReductionLib"].extend([1])
2179 if "iregi_dir" in tir:
2180 MLoptions["MLReductionLib"].extend([3])
2181 if "pjfry_dir" in tir:
2182 MLoptions["MLReductionLib"].extend([2])
2183 if "golem_dir" in tir:
2184 MLoptions["MLReductionLib"].extend([4])
2185 if "samurai_dir" in tir:
2186 MLoptions["MLReductionLib"].extend([5])
2187 if "ninja_dir" in tir:
2188 MLoptions["MLReductionLib"].extend([6])
2189
2190 stability = myStabilityChecker.check_matrix_element_stability(matrix_element,
2191 options=options,param_card=param_card,
2192 keep_folder=keep_folder,
2193 MLOptions=MLoptions)
2194
2195 if stability == None:
2196 return None
2197 else:
2198 stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output
2199 return stability
2200
2201
2202
2203
2204 -def check_timing(process_definition, param_card= None, cuttools="",tir={},
2205 output_path=None, options={}, cmd = FakeInterface(),
2206 MLOptions = {}):
2207 """For a single loop process, give a detailed summary of the generation and
2208 execution timing."""
2209
2210 if 'reuse' not in options:
2211 keep_folder = False
2212 else:
2213 keep_folder = options['reuse']
2214 model=process_definition.get('model')
2215 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2216 keep_folder, output_path=output_path, cmd=cmd)
2217 reusing = isinstance(matrix_element, base_objects.Process)
2218 options['reuse'] = reusing
2219 myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir,
2220 output_path=output_path, cmd=cmd)
2221
2222 if not myTimer.loop_optimized_output:
2223 MLoptions = {}
2224 else:
2225 MLoptions = MLOptions
2226 timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card,
2227 keep_folder = keep_folder, options=options,
2228 MLOptions = MLoptions)
2229
2230 if timing2 == None:
2231 return None
2232 else:
2233
2234 res = dict(timing1.items()+timing2.items())
2235 res['loop_optimized_output']=myTimer.loop_optimized_output
2236 return res
2237
2238
2239
2240
2241 -def check_processes(processes, param_card = None, quick = [],cuttools="",tir={},
2242 options=None, reuse = False, output_path=None, cmd = FakeInterface()):
2243 """Check processes by generating them with all possible orderings
2244 of particles (which means different diagram building and Helas
2245 calls), and comparing the resulting matrix element values."""
2246
2247 cmass_scheme = cmd.options['complex_mass_scheme']
2248 if isinstance(processes, base_objects.ProcessDefinition):
2249
2250
2251 multiprocess = processes
2252 model = multiprocess.get('model')
2253
2254
2255 if multiprocess.get('perturbation_couplings')==[]:
2256 evaluator = MatrixElementEvaluator(model,
2257 auth_skipping = True, reuse = False, cmd = cmd)
2258 else:
2259 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2260 model=model, auth_skipping = True,
2261 reuse = False, output_path=output_path, cmd = cmd)
2262
2263 results = run_multiprocs_no_crossings(check_process,
2264 multiprocess,
2265 evaluator,
2266 quick,
2267 options)
2268
2269 if "used_lorentz" not in evaluator.stored_quantities:
2270 evaluator.stored_quantities["used_lorentz"] = []
2271
2272 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
2273
2274 clean_up(output_path)
2275
2276 return results, evaluator.stored_quantities["used_lorentz"]
2277
2278 elif isinstance(processes, base_objects.Process):
2279 processes = base_objects.ProcessList([processes])
2280 elif isinstance(processes, base_objects.ProcessList):
2281 pass
2282 else:
2283 raise InvalidCmd("processes is of non-supported format")
2284
2285 if not processes:
2286 raise InvalidCmd("No processes given")
2287
2288 model = processes[0].get('model')
2289
2290
2291 if processes[0].get('perturbation_couplings')==[]:
2292 evaluator = MatrixElementEvaluator(model, param_card,
2293 auth_skipping = True, reuse = False, cmd = cmd)
2294 else:
2295 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
2296 model=model,param_card=param_card,
2297 auth_skipping = True, reuse = False,
2298 output_path=output_path, cmd = cmd)
2299
2300
2301
2302 sorted_ids = []
2303 comparison_results = []
2304
2305
2306 for process in processes:
2307
2308
2309 if check_already_checked([l.get('id') for l in process.get('legs') if \
2310 not l.get('state')],
2311 [l.get('id') for l in process.get('legs') if \
2312 l.get('state')],
2313 sorted_ids, process, model):
2314 continue
2315
2316 res = check_process(process, evaluator, quick, options)
2317 if res:
2318 comparison_results.append(res)
2319
2320 if "used_lorentz" not in evaluator.stored_quantities:
2321 evaluator.stored_quantities["used_lorentz"] = []
2322
2323 if processes[0].get('perturbation_couplings')!=[] and not reuse:
2324
2325 clean_up(output_path)
2326
2327 return comparison_results, evaluator.stored_quantities["used_lorentz"]
2328
2330 """Check the helas calls for a process by generating the process
2331 using all different permutations of the process legs (or, if
2332 quick, use a subset of permutations), and check that the matrix
2333 element is invariant under this."""
2334
2335 model = process.get('model')
2336
2337
2338 for i, leg in enumerate(process.get('legs')):
2339 leg.set('number', i+1)
2340
2341 logger.info("Checking crossings of %s" % \
2342 process.nice_string().replace('Process:', 'process'))
2343
2344 process_matrix_elements = []
2345
2346
2347
2348 if quick:
2349 leg_positions = [[] for leg in process.get('legs')]
2350 quick = range(1,len(process.get('legs')) + 1)
2351
2352 values = []
2353
2354
2355 number_checked=0
2356 for legs in itertools.permutations(process.get('legs')):
2357
2358 order = [l.get('number') for l in legs]
2359 if quick:
2360 found_leg = True
2361 for num in quick:
2362
2363
2364 leg_position = legs.index([l for l in legs if \
2365 l.get('number') == num][0])
2366
2367 if not leg_position in leg_positions[num-1]:
2368 found_leg = False
2369 leg_positions[num-1].append(leg_position)
2370
2371 if found_leg:
2372 continue
2373
2374
2375
2376 if quick and process.get('perturbation_couplings') and number_checked >3:
2377 continue
2378
2379 legs = base_objects.LegList(legs)
2380
2381 if order != range(1,len(legs) + 1):
2382 logger.info("Testing permutation: %s" % \
2383 order)
2384
2385 newproc = copy.copy(process)
2386 newproc.set('legs',legs)
2387
2388
2389 try:
2390 if newproc.get('perturbation_couplings')==[]:
2391 amplitude = diagram_generation.Amplitude(newproc)
2392 else:
2393
2394 loop_base_objects.cutting_method = 'optimal' if \
2395 number_checked%2 == 0 else 'default'
2396 amplitude = loop_diagram_generation.LoopAmplitude(newproc)
2397 except InvalidCmd:
2398 result=False
2399 else:
2400 result = amplitude.get('diagrams')
2401
2402 loop_base_objects.cutting_method = 'optimal'
2403
2404 if not result:
2405
2406 logging.info("No diagrams for %s" % \
2407 process.nice_string().replace('Process', 'process'))
2408 break
2409
2410 if order == range(1,len(legs) + 1):
2411
2412 p, w_rambo = evaluator.get_momenta(process, options)
2413
2414
2415 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
2416 matrix_element = helas_objects.HelasMatrixElement(amplitude,
2417 gen_color=False)
2418 else:
2419 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2420 optimized_output=evaluator.loop_optimized_output)
2421
2422
2423
2424
2425 if amplitude.get('process').get('has_born'):
2426
2427
2428 if matrix_element in process_matrix_elements:
2429
2430
2431 continue
2432
2433 process_matrix_elements.append(matrix_element)
2434
2435 res = evaluator.evaluate_matrix_element(matrix_element, p = p,
2436 options=options)
2437 if res == None:
2438 break
2439
2440 values.append(res[0])
2441 number_checked += 1
2442
2443
2444
2445 if abs(max(values)) + abs(min(values)) > 0 and \
2446 2 * abs(max(values) - min(values)) / \
2447 (abs(max(values)) + abs(min(values))) > 0.01:
2448 break
2449
2450
2451 if not values:
2452 return None
2453
2454
2455
2456 diff = 0
2457 if abs(max(values)) + abs(min(values)) > 0:
2458 diff = 2* abs(max(values) - min(values)) / \
2459 (abs(max(values)) + abs(min(values)))
2460
2461
2462 if process.get('perturbation_couplings'):
2463 passed = diff < 1.e-5
2464 else:
2465 passed = diff < 1.e-8
2466
2467 return {"process": process,
2468 "momenta": p,
2469 "values": values,
2470 "difference": diff,
2471 "passed": passed}
2472
2474 """Clean-up the possible left-over outputs from 'evaluate_matrix element' of
2475 the LoopMatrixEvaluator (when its argument proliferate is set to true). """
2476
2477 if mg_root is None:
2478 pass
2479
2480 directories = misc.glob('%s*' % temp_dir_prefix, mg_root)
2481 if directories != []:
2482 logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix)
2483 for dir in directories:
2484
2485 if os.path.isdir(pjoin(dir,'SubProcesses')):
2486 shutil.rmtree(dir)
2487
2496
2497 -def output_profile(myprocdef, stability, timing, output_path, reusing=False):
2498 """Present the results from a timing and stability consecutive check"""
2499
2500 opt = timing['loop_optimized_output']
2501
2502 text = 'Timing result for the '+('optimized' if opt else 'default')+\
2503 ' output:\n'
2504 text += output_timings(myprocdef,timing)
2505
2506 text += '\nStability result for the '+('optimized' if opt else 'default')+\
2507 ' output:\n'
2508 text += output_stability(stability,output_path, reusing=reusing)
2509
2510 mode = 'optimized' if opt else 'default'
2511 logFilePath = pjoin(output_path, 'profile_%s_%s.log'\
2512 %(mode,stability['Process'].shell_string()))
2513 logFile = open(logFilePath, 'w')
2514 logFile.write(text)
2515 logFile.close()
2516 logger.info('Log of this profile check was output to file %s'\
2517 %str(logFilePath))
2518 return text
2519
2521 """Present the result of a stability check in a nice format.
2522 The full info is printed out in 'Stability_result_<proc_shell_string>.dat'
2523 under the MadGraph5_aMC@NLO root folder (output_path)"""
2524
2525 def accuracy(eval_list):
2526 """ Compute the accuracy from different evaluations."""
2527 return (2.0*(max(eval_list)-min(eval_list))/
2528 abs(max(eval_list)+min(eval_list)))
2529
2530 def best_estimate(eval_list):
2531 """ Returns the best estimate from different evaluations."""
2532 return (max(eval_list)+min(eval_list))/2.0
2533
2534 def loop_direction_test_power(eval_list):
2535 """ Computes the loop direction test power P is computed as follow:
2536 P = accuracy(loop_dir_test) / accuracy(all_test)
2537 So that P is large if the loop direction test is effective.
2538 The tuple returned is (log(median(P)),log(min(P)),frac)
2539 where frac is the fraction of events with powers smaller than -3
2540 which means events for which the reading direction test shows an
2541 accuracy three digits higher than it really is according to the other
2542 tests."""
2543 powers=[]
2544 for eval in eval_list:
2545 loop_dir_evals = [eval['CTModeA'],eval['CTModeB']]
2546
2547 other_evals = [eval[key] for key in eval.keys() if key not in \
2548 ['CTModeB','Accuracy']]
2549 if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0:
2550 powers.append(accuracy(loop_dir_evals)/accuracy(other_evals))
2551
2552 n_fail=0
2553 for p in powers:
2554 if (math.log(p)/math.log(10))<-3:
2555 n_fail+=1
2556
2557 if len(powers)==0:
2558 return (None,None,None)
2559
2560 return (math.log(median(powers))/math.log(10),
2561 math.log(min(powers))/math.log(10),
2562 n_fail/len(powers))
2563
2564 def test_consistency(dp_eval_list, qp_eval_list):
2565 """ Computes the consistency test C from the DP and QP evaluations.
2566 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2567 So a consistent test would have C as close to one as possible.
2568 The tuple returned is (log(median(C)),log(min(C)),log(max(C)))"""
2569 consistencies = []
2570 for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list):
2571 dp_evals = [dp_eval[key] for key in dp_eval.keys() \
2572 if key!='Accuracy']
2573 qp_evals = [qp_eval[key] for key in qp_eval.keys() \
2574 if key!='Accuracy']
2575 if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \
2576 accuracy(dp_evals)!=0.0:
2577 consistencies.append(accuracy(dp_evals)/(abs(\
2578 best_estimate(qp_evals)-best_estimate(dp_evals))))
2579
2580 if len(consistencies)==0:
2581 return (None,None,None)
2582
2583 return (math.log(median(consistencies))/math.log(10),
2584 math.log(min(consistencies))/math.log(10),
2585 math.log(max(consistencies))/math.log(10))
2586
2587 def median(orig_list):
2588 """ Find the median of a sorted float list. """
2589 list=copy.copy(orig_list)
2590 list.sort()
2591 if len(list)%2==0:
2592 return (list[int((len(list)/2)-1)]+list[int(len(list)/2)])/2.0
2593 else:
2594 return list[int((len(list)-1)/2)]
2595
2596
2597 f = format_output
2598
2599 opt = stability['loop_optimized_output']
2600
2601 mode = 'optimized' if opt else 'default'
2602 process = stability['Process']
2603 res_str = "Stability checking for %s (%s mode)\n"\
2604 %(process.nice_string()[9:],mode)
2605
2606 logFile = open(pjoin(output_path, 'stability_%s_%s.log'\
2607 %(mode,process.shell_string())), 'w')
2608
2609 logFile.write('Stability check results\n\n')
2610 logFile.write(res_str)
2611 data_plot_dict={}
2612 accuracy_dict={}
2613 nPSmax=0
2614 max_acc=0.0
2615 min_acc=1.0
2616 if stability['Stability']:
2617 toolnames= stability['Stability'].keys()
2618 toolnamestr=" | ".join(tn+
2619 ''.join([' ']*(10-len(tn))) for tn in toolnames)
2620 DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \
2621 for key,stab in stability['Stability'].items()]
2622 med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability])
2623 min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability])
2624 max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability])
2625 UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()]
2626 res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr
2627 len_PS=["%i"%len(evals)+\
2628 ''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability]
2629 len_PS_str=" | ".join(len_PS)
2630 res_str_i += "|= Number of PS points considered %s\n"%len_PS_str
2631 res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str
2632 res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str
2633 res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str
2634 pmedminlist=[]
2635 pfraclist=[]
2636 for key,stab in stability['Stability'].items():
2637 (pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability'])
2638 ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f'))
2639 pfrac_str = f(pfrac,'%.2e')
2640 pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str))))
2641 pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str))))
2642 pmedminlist_str=" | ".join(pmedminlist)
2643 pfraclist_str=" | ".join(pfraclist)
2644 res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str
2645 res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str
2646 len_UPS=["%i"%len(upup)+\
2647 ''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS]
2648 len_UPS_str=" | ".join(len_UPS)
2649 res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str
2650 res_str_i += \
2651 """
2652 = Legend for the statistics of the stability tests. (all log below ar log_10)
2653 The loop direction test power P is computed as follow:
2654 P = accuracy(loop_dir_test) / accuracy(all_other_test)
2655 So that log(P) is positive if the loop direction test is effective.
2656 The tuple printed out is (log(median(P)),log(min(P)))
2657 The consistency test C is computed when QP evaluations are available:
2658 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2659 So a consistent test would have log(C) as close to zero as possible.
2660 The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n"""
2661 res_str+=res_str_i
2662 for key in stability['Stability'].keys():
2663 toolname=key
2664 stab=stability['Stability'][key]
2665 DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']]
2666
2667 QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \
2668 stab['QP_stability']]
2669 nPS = len(DP_stability)
2670 if nPS>nPSmax:nPSmax=nPS
2671 UPS = stab['Unstable_PS_points']
2672 UPS_stability_DP = [DP_stability[U[0]] for U in UPS]
2673 UPS_stability_QP = [QP_stability[U[0]] for U in UPS]
2674 EPS = stab['Exceptional_PS_points']
2675 EPS_stability_DP = [DP_stability[E[0]] for E in EPS]
2676 EPS_stability_QP = [QP_stability[E[0]] for E in EPS]
2677 res_str_i = ""
2678
2679 xml_toolname = {'GOLEM95':'GOLEM','IREGI':'IREGI',
2680 'CUTTOOLS':'CUTTOOLS','PJFRY++':'PJFRY',
2681 'NINJA':'NINJA','SAMURAI':'SAMURAI'}[toolname.upper()]
2682 if len(UPS)>0:
2683 res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\
2684 %(len(UPS),nPS,toolname)
2685 prefix = 'DP' if toolname=='CutTools' else ''
2686 res_str_i += "|= %s Median inaccuracy.......... %s\n"\
2687 %(prefix,f(median(UPS_stability_DP),'%.2e'))
2688 res_str_i += "|= %s Max accuracy............... %s\n"\
2689 %(prefix,f(min(UPS_stability_DP),'%.2e'))
2690 res_str_i += "|= %s Min accuracy............... %s\n"\
2691 %(prefix,f(max(UPS_stability_DP),'%.2e'))
2692 (pmed,pmin,pfrac)=loop_direction_test_power(\
2693 [stab['DP_stability'][U[0]] for U in UPS])
2694 if toolname=='CutTools':
2695 res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\
2696 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2697 res_str_i += "|= UPS DP fraction with power<-3. %s\n"\
2698 %f(pfrac,'%.2e')
2699 res_str_i += "|= QP Median accuracy............ %s\n"\
2700 %f(median(UPS_stability_QP),'%.2e')
2701 res_str_i += "|= QP Max accuracy............... %s\n"\
2702 %f(min(UPS_stability_QP),'%.2e')
2703 res_str_i += "|= QP Min accuracy............... %s\n"\
2704 %f(max(UPS_stability_QP),'%.2e')
2705 (pmed,pmin,pfrac)=loop_direction_test_power(\
2706 [stab['QP_stability'][U[0]] for U in UPS])
2707 res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\
2708 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2709 res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2710 (pmed,pmin,pmax)=test_consistency(\
2711 [stab['DP_stability'][U[0]] for U in UPS],
2712 [stab['QP_stability'][U[0]] for U in UPS])
2713 res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\
2714 %(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f'))
2715 if len(EPS)==0:
2716 res_str_i += "= Number of Exceptional PS points : 0\n"
2717 if len(EPS)>0:
2718 res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\
2719 %(len(EPS),nPS,toolname)
2720 res_str_i += "|= DP Median accuracy............ %s\n"\
2721 %f(median(EPS_stability_DP),'%.2e')
2722 res_str_i += "|= DP Max accuracy............... %s\n"\
2723 %f(min(EPS_stability_DP),'%.2e')
2724 res_str_i += "|= DP Min accuracy............... %s\n"\
2725 %f(max(EPS_stability_DP),'%.2e')
2726 pmed,pmin,pfrac=loop_direction_test_power(\
2727 [stab['DP_stability'][E[0]] for E in EPS])
2728 res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\
2729 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2730 res_str_i += "|= EPS DP fraction with power<-3. %s\n"\
2731 %f(pfrac,'%.2e')
2732 res_str_i += "|= QP Median accuracy............ %s\n"\
2733 %f(median(EPS_stability_QP),'%.2e')
2734 res_str_i += "|= QP Max accuracy............... %s\n"\
2735 %f(min(EPS_stability_QP),'%.2e')
2736 res_str_i += "|= QP Min accuracy............... %s\n"\
2737 %f(max(EPS_stability_QP),'%.2e')
2738 pmed,pmin,pfrac=loop_direction_test_power(\
2739 [stab['QP_stability'][E[0]] for E in EPS])
2740 res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\
2741 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2742 res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2743
2744 logFile.write(res_str_i)
2745
2746 if len(EPS)>0:
2747 logFile.write('\nFull details of the %i EPS encountered by %s.\n'\
2748 %(len(EPS),toolname))
2749 logFile.write('<EPS_data reduction=%s>\n'%xml_toolname.upper())
2750 for i, eps in enumerate(EPS):
2751 logFile.write('\nEPS #%i\n'%(i+1))
2752 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2753 for p in eps[1]]))
2754 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[eps[0]])
2755 logFile.write(' QP accuracy : %.4e\n'%QP_stability[eps[0]])
2756 logFile.write('</EPS_data>\n')
2757 if len(UPS)>0:
2758 logFile.write('\nFull details of the %i UPS encountered by %s.\n'\
2759 %(len(UPS),toolname))
2760 logFile.write('<UPS_data reduction=%s>\n'%xml_toolname.upper())
2761 for i, ups in enumerate(UPS):
2762 logFile.write('\nUPS #%i\n'%(i+1))
2763 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2764 for p in ups[1]]))
2765 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[ups[0]])
2766 logFile.write(' QP accuracy : %.4e\n'%QP_stability[ups[0]])
2767 logFile.write('</UPS_data>\n')
2768
2769 logFile.write('\nData entries for the stability plot.\n')
2770 logFile.write('First row is a maximal accuracy delta, second is the '+\
2771 'fraction of events with DP accuracy worse than delta.\n')
2772 logFile.write('<plot_data reduction=%s>\n'%xml_toolname.upper())
2773
2774 if max(DP_stability)>0.0:
2775 min_digit_acc=int(math.log(max(DP_stability))/math.log(10))
2776 if min_digit_acc>=0:
2777 min_digit_acc = min_digit_acc+1
2778 accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)]
2779 else:
2780 logFile.writelines('%.4e %.4e\n'%(accuracies[i], 0.0) for i in \
2781 range(len(accuracies)))
2782 logFile.write('</plot_data>\n')
2783 res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\
2784 ' is output then.'
2785 logFile.write('Perfect accuracy over all the trial PS points.')
2786 res_str +=res_str_i
2787 continue
2788
2789 accuracy_dict[toolname]=accuracies
2790 if max(accuracies) > max_acc: max_acc=max(accuracies)
2791 if min(accuracies) < min_acc: min_acc=min(accuracies)
2792 data_plot=[]
2793 for acc in accuracies:
2794 data_plot.append(float(len([d for d in DP_stability if d>acc]))\
2795 /float(len(DP_stability)))
2796 data_plot_dict[toolname]=data_plot
2797
2798 logFile.writelines('%.4e %.4e\n'%(accuracies[i], data_plot[i]) for i in \
2799 range(len(accuracies)))
2800 logFile.write('</plot_data>\n')
2801 logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\
2802 %(nPS,toolname))
2803 logFile.write('First row is DP, second is QP (if available).\n\n')
2804 logFile.write('<accuracies reduction=%s>\n'%xml_toolname.upper())
2805 logFile.writelines('%.4e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \
2806 else '%.4e\n'%QP_stability[i]) for i in range(nPS))
2807 logFile.write('</accuracies>\n')
2808 res_str+=res_str_i
2809 logFile.close()
2810 res_str += "\n= Stability details of the run are output to the file"+\
2811 " stability_%s_%s.log\n"%(mode,process.shell_string())
2812
2813
2814
2815
2816 if any(isinstance(handler,logging.FileHandler) for handler in \
2817 logging.getLogger('madgraph').handlers):
2818 return res_str
2819
2820 try:
2821 import matplotlib.pyplot as plt
2822 colorlist=['b','r','g','y','m','c']
2823 for i,key in enumerate(data_plot_dict.keys()):
2824 color=colorlist[i]
2825 data_plot=data_plot_dict[key]
2826 accuracies=accuracy_dict[key]
2827 plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\
2828 label=key)
2829 plt.axis([min_acc,max_acc,\
2830 10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1])
2831 plt.yscale('log')
2832 plt.xscale('log')
2833 plt.title('Stability plot for %s (%s mode, %d points)'%\
2834 (process.nice_string()[9:],mode,nPSmax))
2835 plt.ylabel('Fraction of events')
2836 plt.xlabel('Maximal precision')
2837 plt.legend()
2838 if not reusing:
2839 logger.info('Some stability statistics will be displayed once you '+\
2840 'close the plot window')
2841 plt.show()
2842 else:
2843 fig_output_file = str(pjoin(output_path,
2844 'stability_plot_%s_%s.png'%(mode,process.shell_string())))
2845 logger.info('Stability plot output to file %s. '%fig_output_file)
2846 plt.savefig(fig_output_file)
2847 return res_str
2848 except Exception as e:
2849 if isinstance(e, ImportError):
2850 res_str += "\n= Install matplotlib to get a "+\
2851 "graphical display of the results of this check."
2852 else:
2853 res_str += "\n= Could not produce the stability plot because of "+\
2854 "the following error: %s"%str(e)
2855 return res_str
2856
2858 """Present the result of a timings check in a nice format """
2859
2860
2861 f = format_output
2862 loop_optimized_output = timings['loop_optimized_output']
2863
2864 res_str = "%s \n"%process.nice_string()
2865 try:
2866 gen_total = timings['HELAS_MODEL_compilation']+\
2867 timings['HelasDiagrams_generation']+\
2868 timings['Process_output']+\
2869 timings['Diagrams_generation']+\
2870 timings['Process_compilation']+\
2871 timings['Initialization']
2872 except TypeError:
2873 gen_total = None
2874 res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs')
2875 res_str += "|= Diagrams generation....... %s\n"\
2876 %f(timings['Diagrams_generation'],'%.3gs')
2877 res_str += "|= Helas Diagrams generation. %s\n"\
2878 %f(timings['HelasDiagrams_generation'],'%.3gs')
2879 res_str += "|= Process output............ %s\n"\
2880 %f(timings['Process_output'],'%.3gs')
2881 res_str += "|= HELAS+model compilation... %s\n"\
2882 %f(timings['HELAS_MODEL_compilation'],'%.3gs')
2883 res_str += "|= Process compilation....... %s\n"\
2884 %f(timings['Process_compilation'],'%.3gs')
2885 res_str += "|= Initialization............ %s\n"\
2886 %f(timings['Initialization'],'%.3gs')
2887
2888 res_str += "\n= Helicity sum time / PSpoint ========== %.3gms\n"\
2889 %(timings['run_unpolarized_total']*1000.0)
2890 if loop_optimized_output:
2891 coef_time=timings['run_unpolarized_coefs']*1000.0
2892 loop_time=(timings['run_unpolarized_total']-\
2893 timings['run_unpolarized_coefs'])*1000.0
2894 total=coef_time+loop_time
2895 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2896 %(coef_time,int(round(100.0*coef_time/total)))
2897 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\
2898 %(loop_time,int(round(100.0*loop_time/total)))
2899 res_str += "\n= One helicity time / PSpoint ========== %.3gms\n"\
2900 %(timings['run_polarized_total']*1000.0)
2901 if loop_optimized_output:
2902 coef_time=timings['run_polarized_coefs']*1000.0
2903 loop_time=(timings['run_polarized_total']-\
2904 timings['run_polarized_coefs'])*1000.0
2905 total=coef_time+loop_time
2906 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2907 %(coef_time,int(round(100.0*coef_time/total)))
2908 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\
2909 %(loop_time,int(round(100.0*loop_time/total)))
2910 res_str += "\n= Miscellaneous ========================\n"
2911 res_str += "|= Number of hel. computed... %s/%s\n"\
2912 %(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d'))
2913 res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d')
2914 if loop_optimized_output:
2915 res_str += "|= Number of loop groups..... %s\n"\
2916 %f(timings['n_loop_groups'],'%d')
2917 res_str += "|= Number of loop wfs........ %s\n"\
2918 %f(timings['n_loop_wfs'],'%d')
2919 if timings['loop_wfs_ranks']!=None:
2920 for i, r in enumerate(timings['loop_wfs_ranks']):
2921 res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r)
2922 res_str += "|= Loading time (Color data). ~%.3gms\n"\
2923 %(timings['Booting_time']*1000.0)
2924 res_str += "|= Maximum RAM usage (rss)... %s\n"\
2925 %f(float(timings['ram_usage']/1000.0),'%.3gMb')
2926 res_str += "\n= Output disk size =====================\n"
2927 res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb')
2928 res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb')
2929 res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb')
2930 res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb')
2931
2932 return res_str
2933
2935 """Present the results of a comparison in a nice list format
2936 mode short: return the number of fail process
2937 """
2938 proc_col_size = 17
2939 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
2940 if pert_coupl:
2941 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
2942 else:
2943 process_header = "Process"
2944
2945 if len(process_header) + 1 > proc_col_size:
2946 proc_col_size = len(process_header) + 1
2947
2948 for proc in comparison_results:
2949 if len(proc['process'].base_string()) + 1 > proc_col_size:
2950 proc_col_size = len(proc['process'].base_string()) + 1
2951
2952 col_size = 18
2953
2954 pass_proc = 0
2955 fail_proc = 0
2956 no_check_proc = 0
2957
2958 failed_proc_list = []
2959 no_check_proc_list = []
2960
2961 res_str = fixed_string_length(process_header, proc_col_size) + \
2962 fixed_string_length("Min element", col_size) + \
2963 fixed_string_length("Max element", col_size) + \
2964 fixed_string_length("Relative diff.", col_size) + \
2965 "Result"
2966
2967 for result in comparison_results:
2968 proc = result['process'].base_string()
2969 values = result['values']
2970
2971 if len(values) <= 1:
2972 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
2973 " * No permutations, process not checked *"
2974 no_check_proc += 1
2975 no_check_proc_list.append(result['process'].nice_string())
2976 continue
2977
2978 passed = result['passed']
2979
2980 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
2981 fixed_string_length("%1.10e" % min(values), col_size) + \
2982 fixed_string_length("%1.10e" % max(values), col_size) + \
2983 fixed_string_length("%1.10e" % result['difference'],
2984 col_size)
2985 if passed:
2986 pass_proc += 1
2987 res_str += "Passed"
2988 else:
2989 fail_proc += 1
2990 failed_proc_list.append(result['process'].nice_string())
2991 res_str += "Failed"
2992
2993 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
2994 (pass_proc, pass_proc + fail_proc,
2995 fail_proc, pass_proc + fail_proc)
2996
2997 if fail_proc != 0:
2998 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
2999 if no_check_proc != 0:
3000 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
3001
3002 return res_str
3003
3005 """Helper function to fix the length of a string by cutting it
3006 or adding extra space."""
3007
3008 if len(mystr) > length:
3009 return mystr[0:length]
3010 else:
3011 return mystr + " " * (length - len(mystr))
3012
3013
3014
3015
3016
3017 -def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False,
3018 options=None, output_path=None, cmd = FakeInterface()):
3019 """Check gauge invariance of the processes by using the BRS check.
3020 For one of the massless external bosons (e.g. gluon or photon),
3021 replace the polarization vector (epsilon_mu) with its momentum (p_mu)
3022 """
3023 cmass_scheme = cmd.options['complex_mass_scheme']
3024 if isinstance(processes, base_objects.ProcessDefinition):
3025
3026
3027 multiprocess = processes
3028
3029 model = multiprocess.get('model')
3030
3031 if multiprocess.get('perturbation_couplings')==[]:
3032 evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd,
3033 auth_skipping = True, reuse = False)
3034 else:
3035 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3036 cmd=cmd,model=model, param_card=param_card,
3037 auth_skipping = False, reuse = False,
3038 output_path=output_path)
3039
3040 if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]:
3041
3042 logger.info('Set All width to zero for non complex mass scheme checks')
3043 for particle in evaluator.full_model.get('particles'):
3044 if particle.get('width') != 'ZERO':
3045 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3046 results = run_multiprocs_no_crossings(check_gauge_process,
3047 multiprocess,
3048 evaluator,
3049 options=options
3050 )
3051
3052 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3053
3054 clean_up(output_path)
3055
3056 return results
3057
3058 elif isinstance(processes, base_objects.Process):
3059 processes = base_objects.ProcessList([processes])
3060 elif isinstance(processes, base_objects.ProcessList):
3061 pass
3062 else:
3063 raise InvalidCmd("processes is of non-supported format")
3064
3065 assert processes, "No processes given"
3066
3067 model = processes[0].get('model')
3068
3069
3070 if processes[0].get('perturbation_couplings')==[]:
3071 evaluator = MatrixElementEvaluator(model, param_card,
3072 auth_skipping = True, reuse = False,
3073 cmd = cmd)
3074 else:
3075 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3076 model=model, param_card=param_card,
3077 auth_skipping = False, reuse = False,
3078 output_path=output_path, cmd = cmd)
3079 comparison_results = []
3080 comparison_explicit_flip = []
3081
3082
3083 for process in processes:
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093 result = check_gauge_process(process, evaluator,options=options)
3094 if result:
3095 comparison_results.append(result)
3096
3097 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3098
3099 clean_up(output_path)
3100
3101 return comparison_results
3102
3105 """Check gauge invariance for the process, unless it is already done."""
3106
3107 model = process.get('model')
3108
3109
3110 found_gauge = False
3111 for i, leg in enumerate(process.get('legs')):
3112 part = model.get_particle(leg.get('id'))
3113 if part.get('spin') == 3 and part.get('mass').lower() == 'zero':
3114 found_gauge = True
3115 break
3116 if not found_gauge:
3117 logger.info("No ward identity for %s" % \
3118 process.nice_string().replace('Process', 'process'))
3119
3120 return None
3121
3122 for i, leg in enumerate(process.get('legs')):
3123 leg.set('number', i+1)
3124
3125 logger.info("Checking ward identities for %s" % \
3126 process.nice_string().replace('Process', 'process'))
3127
3128 legs = process.get('legs')
3129
3130
3131 try:
3132 if process.get('perturbation_couplings')==[]:
3133 amplitude = diagram_generation.Amplitude(process)
3134 else:
3135 amplitude = loop_diagram_generation.LoopAmplitude(process)
3136 except InvalidCmd:
3137 logging.info("No diagrams for %s" % \
3138 process.nice_string().replace('Process', 'process'))
3139 return None
3140 if not amplitude.get('diagrams'):
3141
3142 logging.info("No diagrams for %s" % \
3143 process.nice_string().replace('Process', 'process'))
3144 return None
3145
3146 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3147 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3148 gen_color = False)
3149 else:
3150 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3151 optimized_output=evaluator.loop_optimized_output)
3152
3153
3154
3155
3156
3157
3158
3159
3160 brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
3161 output='jamp', options=options)
3162
3163 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3164 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3165 gen_color = False)
3166
3167 mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False,
3168 output='jamp', options=options)
3169
3170 if mvalue and mvalue['m2']:
3171 return {'process':process,'value':mvalue,'brs':brsvalue}
3172
3174 """Present the results of a comparison in a nice list format"""
3175
3176 proc_col_size = 17
3177
3178 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
3179
3180
3181 if pert_coupl:
3182 threshold=1e-5
3183 else:
3184 threshold=1e-10
3185
3186 if pert_coupl:
3187 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3188 else:
3189 process_header = "Process"
3190
3191 if len(process_header) + 1 > proc_col_size:
3192 proc_col_size = len(process_header) + 1
3193
3194 for one_comp in comparison_results:
3195 proc = one_comp['process'].base_string()
3196 mvalue = one_comp['value']
3197 brsvalue = one_comp['brs']
3198 if len(proc) + 1 > proc_col_size:
3199 proc_col_size = len(proc) + 1
3200
3201 col_size = 18
3202
3203 pass_proc = 0
3204 fail_proc = 0
3205
3206 failed_proc_list = []
3207 no_check_proc_list = []
3208
3209 res_str = fixed_string_length(process_header, proc_col_size) + \
3210 fixed_string_length("matrix", col_size) + \
3211 fixed_string_length("BRS", col_size) + \
3212 fixed_string_length("ratio", col_size) + \
3213 "Result"
3214
3215 for one_comp in comparison_results:
3216 proc = one_comp['process'].base_string()
3217 mvalue = one_comp['value']
3218 brsvalue = one_comp['brs']
3219 ratio = (abs(brsvalue['m2'])/abs(mvalue['m2']))
3220 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3221 fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \
3222 fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \
3223 fixed_string_length("%1.10e" % ratio, col_size)
3224
3225 if ratio > threshold:
3226 fail_proc += 1
3227 proc_succeed = False
3228 failed_proc_list.append(proc)
3229 res_str += "Failed"
3230 else:
3231 pass_proc += 1
3232 proc_succeed = True
3233 res_str += "Passed"
3234
3235
3236
3237
3238
3239 if len(mvalue['jamp'])!=0:
3240 for k in range(len(mvalue['jamp'][0])):
3241 m_sum = 0
3242 brs_sum = 0
3243
3244 for j in range(len(mvalue['jamp'])):
3245
3246 m_sum += abs(mvalue['jamp'][j][k])**2
3247 brs_sum += abs(brsvalue['jamp'][j][k])**2
3248
3249
3250 if not m_sum:
3251 continue
3252 ratio = abs(brs_sum) / abs(m_sum)
3253
3254 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
3255 fixed_string_length("%1.10e" % m_sum, col_size) + \
3256 fixed_string_length("%1.10e" % brs_sum, col_size) + \
3257 fixed_string_length("%1.10e" % ratio, col_size)
3258
3259 if ratio > 1e-15:
3260 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3261 fail_proc += 1
3262 pass_proc -= 1
3263 failed_proc_list.append(proc)
3264 res_str += tmp_str + "Failed"
3265 elif not proc_succeed:
3266 res_str += tmp_str + "Passed"
3267
3268
3269 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3270 (pass_proc, pass_proc + fail_proc,
3271 fail_proc, pass_proc + fail_proc)
3272
3273 if fail_proc != 0:
3274 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3275
3276 if output=='text':
3277 return res_str
3278 else:
3279 return fail_proc
3280
3281
3282
3283 -def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \
3284 reuse = False, output_path=None, cmd = FakeInterface()):
3285 """ Check if the square matrix element (sum over helicity) is lorentz
3286 invariant by boosting the momenta with different value."""
3287
3288 cmass_scheme = cmd.options['complex_mass_scheme']
3289 if isinstance(processes, base_objects.ProcessDefinition):
3290
3291
3292 multiprocess = processes
3293 model = multiprocess.get('model')
3294
3295 if multiprocess.get('perturbation_couplings')==[]:
3296 evaluator = MatrixElementEvaluator(model,
3297 cmd= cmd, auth_skipping = False, reuse = True)
3298 else:
3299 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3300 model=model, auth_skipping = False, reuse = True,
3301 output_path=output_path, cmd = cmd)
3302
3303 if not cmass_scheme and processes.get('perturbation_couplings')==[]:
3304
3305 logger.info('Set All width to zero for non complex mass scheme checks')
3306 for particle in evaluator.full_model.get('particles'):
3307 if particle.get('width') != 'ZERO':
3308 evaluator.full_model.get('parameter_dict')[\
3309 particle.get('width')] = 0.
3310
3311 results = run_multiprocs_no_crossings(check_lorentz_process,
3312 multiprocess,
3313 evaluator,
3314 options=options)
3315
3316 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3317
3318 clean_up(output_path)
3319
3320 return results
3321
3322 elif isinstance(processes, base_objects.Process):
3323 processes = base_objects.ProcessList([processes])
3324 elif isinstance(processes, base_objects.ProcessList):
3325 pass
3326 else:
3327 raise InvalidCmd("processes is of non-supported format")
3328
3329 assert processes, "No processes given"
3330
3331 model = processes[0].get('model')
3332
3333
3334 if processes[0].get('perturbation_couplings')==[]:
3335 evaluator = MatrixElementEvaluator(model, param_card,
3336 auth_skipping = False, reuse = True,
3337 cmd=cmd)
3338 else:
3339 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
3340 model=model,param_card=param_card,
3341 auth_skipping = False, reuse = True,
3342 output_path=output_path, cmd = cmd)
3343
3344 comparison_results = []
3345
3346
3347 for process in processes:
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357 result = check_lorentz_process(process, evaluator,options=options)
3358 if result:
3359 comparison_results.append(result)
3360
3361 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3362
3363 clean_up(output_path)
3364
3365 return comparison_results
3366
3369 """Check gauge invariance for the process, unless it is already done."""
3370
3371 amp_results = []
3372 model = process.get('model')
3373
3374 for i, leg in enumerate(process.get('legs')):
3375 leg.set('number', i+1)
3376
3377 logger.info("Checking lorentz transformations for %s" % \
3378 process.nice_string().replace('Process:', 'process'))
3379
3380 legs = process.get('legs')
3381
3382
3383 try:
3384 if process.get('perturbation_couplings')==[]:
3385 amplitude = diagram_generation.Amplitude(process)
3386 else:
3387 amplitude = loop_diagram_generation.LoopAmplitude(process)
3388 except InvalidCmd:
3389 logging.info("No diagrams for %s" % \
3390 process.nice_string().replace('Process', 'process'))
3391 return None
3392
3393 if not amplitude.get('diagrams'):
3394
3395 logging.info("No diagrams for %s" % \
3396 process.nice_string().replace('Process', 'process'))
3397 return None
3398
3399
3400 p, w_rambo = evaluator.get_momenta(process, options)
3401
3402
3403 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3404 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3405 gen_color = True)
3406 else:
3407 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3408 optimized_output = evaluator.loop_optimized_output)
3409
3410 MLOptions = {'ImprovePS':True,'ForceMP':True}
3411 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3412 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3413 auth_skipping = True, options=options)
3414 else:
3415 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3416 auth_skipping = True, PS_name = 'original', MLOptions=MLOptions,
3417 options = options)
3418
3419 if data and data['m2']:
3420 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3421 results = [data]
3422 else:
3423 results = [('Original evaluation',data)]
3424 else:
3425 return {'process':process, 'results':'pass'}
3426
3427
3428
3429
3430 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3431 for boost in range(1,4):
3432 boost_p = boost_momenta(p, boost)
3433 results.append(evaluator.evaluate_matrix_element(matrix_element,
3434 p=boost_p,output='jamp'))
3435 else:
3436
3437 boost_p = boost_momenta(p, 3)
3438 results.append(('Z-axis boost',
3439 evaluator.evaluate_matrix_element(matrix_element, options=options,
3440 p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions)))
3441
3442
3443
3444
3445 if not options['events']:
3446 boost_p = boost_momenta(p, 1)
3447 results.append(('X-axis boost',
3448 evaluator.evaluate_matrix_element(matrix_element, options=options,
3449 p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions)))
3450 boost_p = boost_momenta(p, 2)
3451 results.append(('Y-axis boost',
3452 evaluator.evaluate_matrix_element(matrix_element,options=options,
3453 p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions)))
3454
3455
3456 rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p]
3457 results.append(('Z-axis pi/2 rotation',
3458 evaluator.evaluate_matrix_element(matrix_element,options=options,
3459 p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions)))
3460
3461 sq2 = math.sqrt(2.0)
3462 rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p]
3463 results.append(('Z-axis pi/4 rotation',
3464 evaluator.evaluate_matrix_element(matrix_element,options=options,
3465 p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions)))
3466
3467
3468 return {'process': process, 'results': results}
3469
3470
3471
3472
3473 -def check_unitary_feynman(processes_unit, processes_feynm, param_card=None,
3474 options=None, tir={}, output_path=None,
3475 cuttools="", reuse=False, cmd = FakeInterface()):
3476 """Check gauge invariance of the processes by flipping
3477 the gauge of the model
3478 """
3479
3480 mg_root = cmd._mgme_dir
3481
3482 cmass_scheme = cmd.options['complex_mass_scheme']
3483
3484 if isinstance(processes_unit, base_objects.ProcessDefinition):
3485
3486
3487 multiprocess_unit = processes_unit
3488 model = multiprocess_unit.get('model')
3489
3490
3491
3492 loop_optimized_bu = cmd.options['loop_optimized_output']
3493 if processes_unit.get('squared_orders'):
3494 if processes_unit.get('perturbation_couplings') in [[],['QCD']]:
3495 cmd.options['loop_optimized_output'] = True
3496 else:
3497 raise InvalidCmd("The gauge test cannot be performed for "+
3498 " a process with more than QCD corrections and which"+
3499 " specifies squared order constraints.")
3500 else:
3501 cmd.options['loop_optimized_output'] = False
3502
3503 aloha.unitary_gauge = True
3504 if processes_unit.get('perturbation_couplings')==[]:
3505 evaluator = MatrixElementEvaluator(model, param_card,
3506 cmd=cmd,auth_skipping = False, reuse = True)
3507 else:
3508 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3509 cmd=cmd, model=model,
3510 param_card=param_card,
3511 auth_skipping = False,
3512 output_path=output_path,
3513 reuse = False)
3514 if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]:
3515 logger.info('Set All width to zero for non complex mass scheme checks')
3516 for particle in evaluator.full_model.get('particles'):
3517 if particle.get('width') != 'ZERO':
3518 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3519
3520 output_u = run_multiprocs_no_crossings(get_value,
3521 multiprocess_unit,
3522 evaluator,
3523 options=options)
3524
3525 clean_added_globals(ADDED_GLOBAL)
3526
3527 if processes_unit.get('perturbation_couplings')!=[]:
3528 clean_up(output_path)
3529
3530 momentum = {}
3531 for data in output_u:
3532 momentum[data['process']] = data['p']
3533
3534 multiprocess_feynm = processes_feynm
3535 model = multiprocess_feynm.get('model')
3536
3537
3538 aloha.unitary_gauge = False
3539
3540
3541 cmd.options['loop_optimized_output'] = True
3542 if processes_feynm.get('perturbation_couplings')==[]:
3543 evaluator = MatrixElementEvaluator(model, param_card,
3544 cmd= cmd, auth_skipping = False, reuse = False)
3545 else:
3546 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3547 cmd= cmd, model=model,
3548 param_card=param_card,
3549 auth_skipping = False,
3550 output_path=output_path,
3551 reuse = False)
3552
3553 if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]:
3554
3555 for particle in evaluator.full_model.get('particles'):
3556 if particle.get('width') != 'ZERO':
3557 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3558
3559 output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm,
3560 evaluator, momentum,
3561 options=options)
3562 output = [processes_unit]
3563 for data in output_f:
3564 local_dico = {}
3565 local_dico['process'] = data['process']
3566 local_dico['value_feynm'] = data['value']
3567 local_dico['value_unit'] = [d['value'] for d in output_u
3568 if d['process'] == data['process']][0]
3569 output.append(local_dico)
3570
3571 if processes_feynm.get('perturbation_couplings')!=[] and not reuse:
3572
3573 clean_up(output_path)
3574
3575
3576 cmd.options['loop_optimized_output'] = loop_optimized_bu
3577
3578 return output
3579
3580
3581
3582
3583 else:
3584 raise InvalidCmd("processes is of non-supported format")
3585
3591 """Check complex mass scheme consistency in the offshell region of s-channels
3592 detected for this process, by varying the expansion paramer consistently
3593 with the corresponding width and making sure that the difference between
3594 the complex mass-scheme and the narrow-width approximation is higher order.
3595 """
3596
3597 if not isinstance(process_line, str):
3598 raise InvalidCmd("Proces definition must be given as a stirng for this check")
3599
3600
3601 cmd.do_set('complex_mass_scheme False', log=False)
3602
3603 multiprocess_nwa = cmd.extract_process(process_line)
3604
3605
3606 has_FRdecay = os.path.isfile(pjoin(cmd._curr_model.get('modelpath'),
3607 'decays.py'))
3608
3609
3610 missing_perturbations = cmd._curr_model.get_coupling_orders()-\
3611 set(multiprocess_nwa.get('perturbation_couplings'))
3612
3613 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3614 len(missing_perturbations)>0:
3615 logger.warning("------------------------------------------------------")
3616 logger.warning("The process considered does not specify the following "+
3617 "type of loops to be included : %s"%str(list(missing_perturbations)))
3618 logger.warning("Consequently, the CMS check will be unsuccessful if the"+
3619 " process involves any resonating particle whose LO decay is "+
3620 "mediated by one of these orders.")
3621 logger.warning("You can use the syntax '[virt=all]' to automatically"+
3622 " include all loops supported by the model.")
3623 logger.warning("------------------------------------------------------")
3624
3625 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3626 len(multiprocess_nwa.get('legs'))<=4:
3627 logger.warning("------------------------------------------------------")
3628 logger.warning("Processes with four or less external states are typically not"+\
3629 " sensitive to incorrect Complex Mass Scheme implementations.")
3630 logger.warning("You can test this sensitivity by making sure that the"+
3631 " same check on the leading-order counterpart of this process *fails*"+
3632 " when using the option '--diff_lambda_power=2'.")
3633 logger.warning("If it does not, then consider adding a massless "+
3634 "gauge vector to the external states.")
3635 logger.warning("------------------------------------------------------")
3636
3637 if options['recompute_width']=='auto':
3638 if multiprocess_nwa.get('perturbation_couplings')!=[]:
3639
3640 options['recompute_width'] = 'first_time'
3641 else:
3642 options['recompute_width'] = 'never'
3643
3644
3645 if options['recompute_width'] in ['first_time', 'always'] and \
3646 not has_FRdecay and not 'cached_widths' in options:
3647 logger.info('The LO widths will need to be recomputed but the '+
3648 'model considered does not appear to have a decay module.\nThe widths'+
3649 ' will need to be computed numerically and it will slow down the test.\n'+
3650 'Consider using a param_card already specifying correct LO widths and'+
3651 " adding the option --recompute_width=never when doing this check.")
3652
3653 if options['recompute_width']=='never' and \
3654 any(order in multiprocess_nwa.get('perturbation_couplings') for order in
3655 options['expansion_orders']):
3656 logger.warning('You chose not to recompute the widths while including'+
3657 ' loop corrections. The check will be successful only if the width'+\
3658 ' specified in the default param_card is LO accurate (Remember that'+\
3659 ' the default values of alpha_s and awem1 are set to 0.1 and 10.0'+\
3660 ' respectively by default).')
3661
3662
3663
3664
3665
3666 if options['recompute_width'] in ['first_time', 'always'] and has_FRdecay:
3667 modelname = cmd._curr_model.get('modelpath+restriction')
3668 with misc.MuteLogger(['madgraph'], ['INFO']):
3669 model = import_ufo.import_model(modelname, decay=True,
3670 complex_mass_scheme=False)
3671 multiprocess_nwa.set('model', model)
3672
3673 run_options = copy.deepcopy(options)
3674
3675
3676 if options['seed'] > 0:
3677 random.seed(options['seed'])
3678
3679
3680 run_options['param_card'] = param_card
3681 if isinstance(cmd, FakeInterface):
3682 raise MadGraph5Error, "Check CMS cannot be run with a FakeInterface."
3683 run_options['cmd'] = cmd
3684 run_options['MLOptions'] = MLOptions
3685 if output_path:
3686 run_options['output_path'] = output_path
3687 else:
3688 run_options['output_path'] = cmd._mgme_dir
3689
3690
3691 run_options['has_FRdecay'] = has_FRdecay
3692
3693
3694 if 'cached_widths' not in run_options:
3695 run_options['cached_widths'] = {}
3696
3697
3698 run_options['cached_param_card'] = {'NWA':[None,None],'CMS':[None,None]}
3699
3700 if options['tweak']['name']:
3701 logger.info("Now running the CMS check for tweak '%s'"\
3702 %options['tweak']['name'])
3703
3704 model = multiprocess_nwa.get('model')
3705
3706 for particle in model.get('particles'):
3707 mass_param = model.get_parameter(particle.get('mass'))
3708 if particle.get('mass')!='ZERO' and 'external' not in mass_param.depend:
3709 if model.get('name') not in ['sm','loop_sm']:
3710 logger.warning("The mass '%s' of particle '%s' is not an external"%\
3711 (model.get_parameter(particle.get('mass')).name,particle.get('name'))+\
3712 " parameter as required by this check. \nMG5_aMC will try to"+\
3713 " modify the model to remedy the situation. No guarantee.")
3714 status = model.change_electroweak_mode(set(['mz','mw','alpha']))
3715 if not status:
3716 raise InvalidCmd('The EW scheme could apparently not be changed'+\
3717 ' so as to have the W-boson mass external. The check cannot'+\
3718 ' proceed.')
3719 break
3720
3721 veto_orders = [order for order in model.get('coupling_orders') if \
3722 order not in options['expansion_orders']]
3723 if len(veto_orders)>0:
3724 logger.warning('You did not define any parameter scaling rule for the'+\
3725 " coupling orders %s. They will be "%','.join(veto_orders))+\
3726 "forced to zero in the tests. Consider adding the scaling rule to"+\
3727 "avoid this. (see option '--cms' in 'help check')"
3728 for order in veto_orders:
3729 multiprocess_nwa.get('orders')[order]==0
3730 multiprocess_nwa.set('perturbation_couplings', [order for order in
3731 multiprocess_nwa['perturbation_couplings'] if order not in veto_orders])
3732
3733 if multiprocess_nwa.get('perturbation_couplings')==[]:
3734 evaluator = MatrixElementEvaluator(model, param_card,
3735 cmd=cmd,auth_skipping = False, reuse = True)
3736 else:
3737 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3738 cmd=cmd, model=model,
3739 param_card=param_card,
3740 auth_skipping = False,
3741 output_path=output_path,
3742 reuse = False)
3743
3744 cached_information = []
3745 output_nwa = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3746 multiprocess_nwa,
3747 evaluator,
3748
3749
3750
3751
3752
3753 opt = cached_information,
3754 options=run_options)
3755
3756
3757 clean_added_globals(ADDED_GLOBAL)
3758
3759
3760 cmd.do_set('complex_mass_scheme True', log=False)
3761
3762
3763 multiprocess_cms = cmd.extract_process(process_line)
3764 model = multiprocess_cms.get('model')
3765
3766 if len(veto_orders)>0:
3767 for order in veto_orders:
3768 multiprocess_cms.get('orders')[order]==0
3769 multiprocess_cms.set('perturbation_couplings', [order for order in
3770 multiprocess_cms['perturbation_couplings'] if order not in veto_orders])
3771
3772 if multiprocess_cms.get('perturbation_couplings')==[]:
3773 evaluator = MatrixElementEvaluator(model, param_card,
3774 cmd=cmd,auth_skipping = False, reuse = True)
3775 else:
3776 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3777 cmd=cmd, model=model,
3778 param_card=param_card,
3779 auth_skipping = False,
3780 output_path=output_path,
3781 reuse = False)
3782
3783 output_cms = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3784 multiprocess_cms,
3785 evaluator,
3786
3787 opt = dict(cached_information),
3788 options=run_options)
3789
3790 if multiprocess_cms.get('perturbation_couplings')!=[] and not options['reuse']:
3791
3792 clean_up(output_path)
3793
3794
3795
3796
3797 result = {'ordered_processes':[],'lambdaCMS':options['lambdaCMS']}
3798
3799 result['perturbation_orders']=multiprocess_nwa.get('perturbation_couplings')
3800 for i, proc_res in enumerate(output_nwa):
3801 result['ordered_processes'].append(proc_res[0])
3802 result[proc_res[0]] = {
3803 'NWA':proc_res[1]['resonances_result'],
3804 'CMS':output_cms[i][1]['resonances_result'],
3805 'born_order':proc_res[1]['born_order'],
3806 'loop_order':proc_res[1]['loop_order']}
3807
3808
3809
3810 options['cached_widths'] = run_options['cached_widths']
3811
3812
3813 result['recompute_width'] = options['recompute_width']
3814 result['has_FRdecay'] = has_FRdecay
3815 result['widths_computed'] = []
3816 cached_widths = sorted(options['cached_widths'].items(), key=lambda el: \
3817 abs(el[0][0]))
3818 for (pdg, lambda_value), width in cached_widths:
3819 if lambda_value != 1.0:
3820 continue
3821 result['widths_computed'].append((model.get_particle(pdg).get_name(),
3822 width))
3823
3824
3825 clean_added_globals(ADDED_GLOBAL)
3826
3827 return result
3828
3833 """Check CMS for the process in argument. The options 'opt' is quite important.
3834 When opt is a list, it means that we are doing NWA and we are filling the
3835 list with the following tuple
3836 ('proc_name',({'ParticlePDG':ParticlePDG,
3837 'FinalStateMothersNumbers':set([]),
3838 'PS_point_used':[]},...))
3839 When opt is a dictionary, we are in the CMS mode and it will be reused then.
3840 """
3841
3842
3843
3844 NLO = process.get('perturbation_couplings') != []
3845
3846 def glue_momenta(production, decay):
3847 """ Merge together the kinematics for the production of particle
3848 positioned last in the 'production' array with the 1>N 'decay' kinematic'
3849 provided where the decay particle is first."""
3850
3851 from MadSpin.decay import momentum
3852
3853 full = production[:-1]
3854
3855
3856
3857
3858
3859 for p in decay[1:]:
3860 bp = momentum(*p).boost(momentum(*production[-1]))
3861 full.append([bp.E,bp.px,bp.py,bp.pz])
3862
3863 return full
3864
3865 def find_resonances(diagrams):
3866 """ Find all the resonances in the matrix element in argument """
3867
3868 model = process['model']
3869 resonances_found = []
3870
3871 for ll, diag in enumerate(diagrams):
3872 for amp in diag.get('amplitudes'):
3873
3874
3875 s_channels, t_channels = amp.\
3876 get_s_and_t_channels(process.get_ninitial(), model, 0)
3877
3878
3879 replacement_dict = {}
3880 for s_channel in s_channels:
3881 new_resonance = {
3882 'ParticlePDG':s_channel.get('legs')[-1].get('id'),
3883 'FSMothersNumbers':[],
3884 'PS_point_used':[]}
3885 for leg in s_channel.get('legs')[:-1]:
3886 if leg.get('number')>0:
3887 new_resonance['FSMothersNumbers'].append(
3888 leg.get('number'))
3889 else:
3890 try:
3891 new_resonance['FSMothersNumbers'].extend(
3892 replacement_dict[leg.get('number')])
3893 except KeyError:
3894 raise Exception, 'The following diagram '+\
3895 'is malformed:'+diag.nice_string()
3896
3897 replacement_dict[s_channel.get('legs')[-1].get('number')] = \
3898 new_resonance['FSMothersNumbers']
3899 new_resonance['FSMothersNumbers'] = set(
3900 new_resonance['FSMothersNumbers'])
3901 if new_resonance not in resonances_found:
3902 resonances_found.append(new_resonance)
3903
3904
3905 kept_resonances = []
3906 for resonance in resonances_found:
3907
3908 if resonance['ParticlePDG'] == 0:
3909 continue
3910
3911
3912 if abs(resonance['ParticlePDG']) in \
3913 [abs(l.get('id')) for l in process.get('legs')]:
3914 continue
3915
3916 mass_string = evaluator.full_model.get_particle(
3917 resonance['ParticlePDG']).get('mass')
3918 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
3919
3920 if mass==0.0:
3921 continue
3922
3923 width_string = evaluator.full_model.get_particle(
3924 resonance['ParticlePDG']).get('width')
3925 width = evaluator.full_model.get('parameter_dict')[width_string].real
3926
3927
3928 if width==0.0:
3929 continue
3930
3931 final_state_energy = sum(
3932 evaluator.full_model.get('parameter_dict')[
3933 evaluator.full_model.get_particle(l.get('id')).get('mass')].real
3934 for l in process.get('legs') if l.get('number') in
3935 resonance['FSMothersNumbers'])
3936
3937
3938 special_mass = (1.0 + options['offshellness'])*mass
3939
3940
3941 if special_mass<final_state_energy:
3942 raise InvalidCmd('The offshellness specified (%s) is such'\
3943 %options['offshellness']+' that the resulting kinematic is '+\
3944 'impossible for resonance %s %s.'%(evaluator.full_model.
3945 get_particle(resonance['ParticlePDG']).get_name(),
3946 str(list(resonance['FSMothersNumbers']))))
3947 continue
3948
3949
3950 kept_resonances.append(resonance)
3951
3952 for resonance in kept_resonances:
3953
3954 set_PSpoint(resonance, force_other_res_offshell=kept_resonances)
3955
3956
3957
3958 return tuple(kept_resonances)
3959
3960 def set_PSpoint(resonance, force_other_res_offshell=[],
3961 allow_energy_increase=1.5, isolation_cuts=True):
3962 """ Starting from the specified resonance, construct a phase space point
3963 for it and possibly also enforce other resonances to be onshell. Possibly
3964 allow to progressively increase enregy by steps of the integer specified
3965 (negative float to forbid it) and possible enforce default isolation cuts
3966 as well."""
3967
3968 def invmass(momenta):
3969 """ Computes the invariant mass of a list of momenta."""
3970 ptot = [sum(p[i] for p in momenta) for i in range(4)]
3971 return math.sqrt(ptot[0]**2-ptot[1]**2-ptot[2]**2-ptot[3]**2)
3972
3973 model = evaluator.full_model
3974 def getmass(pdg):
3975 """ Returns the mass of a particle given the current model and its
3976 pdg given in argument."""
3977 return model.get('parameter_dict')[
3978 model.get_particle(pdg).get('mass')].real
3979
3980 N_trials = 0
3981 max_trial = 1e4
3982 nstep_for_energy_increase = 1e3
3983 PS_point_found = None
3984 if options['offshellness'] > 0.0:
3985 offshellness = options['offshellness']
3986 else:
3987
3988
3989
3990
3991 offshellness = (0.25*(options['offshellness']+1.0))-1.0
3992
3993
3994
3995
3996 if options['offshellness'] < 0.0:
3997 energy_increase = math.sqrt(allow_energy_increase)
3998 else:
3999 energy_increase = allow_energy_increase
4000
4001 other_res_offshell = [res for res in force_other_res_offshell if
4002 res!=resonance]
4003
4004
4005
4006 all_other_res_masses = [getmass(res['ParticlePDG'])
4007 for res in other_res_offshell]
4008 resonance_mass = getmass(resonance['ParticlePDG'])
4009
4010 str_res = '%s %s'%(model.get_particle(
4011 resonance['ParticlePDG']).get_name(),
4012 str(list(resonance['FSMothersNumbers'])))
4013 leg_number_to_leg = dict((l.get('number'),l) for l in process.get('legs'))
4014
4015
4016
4017 daughter_masses = sum(getmass(leg_number_to_leg[\
4018 number].get('id')) for number in resonance['FSMothersNumbers'])
4019 min_offshellnes = 4.0*((daughter_masses*1.2)/resonance_mass)-1.0
4020
4021
4022
4023 min_energy = max(sum(getmass(l.get('id')) for l in \
4024 process.get('legs') if l.get('state')==True),
4025 sum(getmass(l.get('id')) for l in \
4026 process.get('legs') if l.get('state')==False))
4027
4028
4029
4030 daughter_offshellnesses = [(1.0+options['offshellness'])*mass
4031 for i, mass in enumerate(all_other_res_masses) if
4032 other_res_offshell[i]['FSMothersNumbers'].issubset(
4033 resonance['FSMothersNumbers'])]
4034
4035 if options['offshellness'] >= 0.0:
4036
4037 if len(daughter_offshellnesses)>0:
4038 max_mass = max(daughter_offshellnesses)
4039
4040 offshellness = max(2.0*(max_mass/resonance_mass)-1.0,
4041 options['offshellness'])
4042
4043 max_mass = max([(1.0+options['offshellness'])*mass for mass in \
4044 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4045
4046
4047
4048 target = max(min_energy*1.2,max_mass*2.0)
4049 if target > options['energy']:
4050 logger.warning("The user-defined energy %f seems "%options['energy']+
4051 " insufficient to reach the minimum propagator invariant mass "+
4052 "%f required for the chosen offshellness %f."%(max_mass,
4053 options['offshellness']) + " Energy reset to %f."%target)
4054 options['energy'] = target
4055
4056 else:
4057 if len(daughter_offshellnesses) > 0:
4058 min_mass = min(daughter_offshellnesses)
4059
4060 offshellness = min(0.25*(min_mass/resonance_mass)-1.0,
4061 options['offshellness'])
4062
4063
4064
4065 if (1.0+offshellness)*resonance_mass < daughter_masses*1.2:
4066 msg = 'The resonance %s cannot accomodate'%str_res+\
4067 ' an offshellness of %f because the daughter'%options['offshellness']+\
4068 ' masses are %f.'%daughter_masses
4069 if options['offshellness']<min_offshellnes:
4070 msg += ' Try again with an offshellness'+\
4071 ' smaller (in absolute value) of at least %f.'%min_offshellnes
4072 else:
4073 msg += ' Try again with a smalled offshellness (in absolute value).'
4074 raise InvalidCmd(msg)
4075
4076 min_mass = min([(1.0+options['offshellness'])*mass for mass in \
4077 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4078
4079
4080 if 2.0*min_mass < options['energy']:
4081 new_energy = max(min_energy*1.2, 2.0*min_mass)
4082 logger.warning("The user-defined energy %f seems "%options['energy']+
4083 " too large to not overshoot the maximum propagator invariant mass "+
4084 "%f required for the chosen offshellness %f."%(min_mass,
4085 options['offshellness']) + " Energy reset to %f."%new_energy)
4086 options['energy'] = new_energy
4087
4088 if options['offshellness'] < 0.0 and options['energy'] >= min_mass:
4089 logger.debug("The target energy is not compatible with the mass"+
4090 " of the external states for this process (%f). It is "%min_mass+
4091 "unlikely that a valid kinematic configuration will be found.")
4092
4093 if options['offshellness']<0.0 and offshellness<options['offshellness'] or \
4094 options['offshellness']>0.0 and offshellness>options['offshellness']:
4095 logger.debug("Offshellness increased to %f"%offshellness+
4096 " so as to try to find a kinematical configuration with"+
4097 " offshellness at least equal to %f"%options['offshellness']+
4098 " for all resonances.")
4099
4100 start_energy = options['energy']
4101 while N_trials<max_trial:
4102 N_trials += 1
4103 if N_trials%nstep_for_energy_increase==0:
4104 if allow_energy_increase > 0.0:
4105 old_offshellness = offshellness
4106 if offshellness > 0.0:
4107 options['energy'] *= energy_increase
4108 offshellness *= energy_increase
4109 else:
4110 options['energy'] = max(options['energy']/energy_increase,
4111 min_energy*1.2)
4112 offshellness = max(min_offshellnes,
4113 ((offshellness+1.0)/energy_increase)-1.0)
4114 if old_offshellness!=offshellness:
4115 logger.debug('Trying to find a valid kinematic'+\
4116 " configuration for resonance '%s'"%str_res+\
4117 ' with increased offshellness %f'%offshellness)
4118
4119 candidate = get_PSpoint_for_resonance(resonance, offshellness)
4120 pass_offshell_test = True
4121 for i, res in enumerate(other_res_offshell):
4122
4123 if offshellness > 0.0:
4124 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) <\
4125 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4126 pass_offshell_test = False
4127 break
4128 else:
4129 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) >\
4130 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4131 pass_offshell_test = False
4132 break
4133 if not pass_offshell_test:
4134 continue
4135
4136 if isolation_cuts:
4137
4138 if not evaluator.pass_isolation_cuts(candidate,
4139 ptcut=0.05*invmass([candidate[0],candidate[1]]), drcut=0.4):
4140 continue
4141 PS_point_found = candidate
4142 break
4143
4144
4145 options['energy'] = start_energy
4146
4147 if PS_point_found is None:
4148 err_msg = 'Could not find a valid PS point in %d'%max_trial+\
4149 ' trials. Try increasing the energy, modify the offshellness '+\
4150 'or relax some constraints.'
4151 if options['offshellness']<0.0:
4152 err_msg +='Try with a positive offshellness instead (or a '+\
4153 'negative one of smaller absolute value)'
4154 raise InvalidCmd, err_msg
4155 else:
4156
4157
4158 resonance['offshellnesses'] = []
4159 all_other_res_masses = [resonance_mass] + all_other_res_masses
4160 other_res_offshell = [resonance] + other_res_offshell
4161 for i, res in enumerate(other_res_offshell):
4162 if i==0:
4163 res_str = 'self'
4164 else:
4165 res_str = '%s %s'%(model.get_particle(
4166 res['ParticlePDG']).get_name(),
4167 str(list(res['FSMothersNumbers'])))
4168 resonance['offshellnesses'].append((res_str,(
4169 (invmass([PS_point_found[j-1] for j in
4170 res['FSMothersNumbers']])/all_other_res_masses[i])-1.0)))
4171
4172 resonance['PS_point_used'] = PS_point_found
4173
4174 def get_PSpoint_for_resonance(resonance, offshellness = options['offshellness']):
4175 """ Assigns a kinematic configuration to the resonance dictionary
4176 given in argument."""
4177
4178
4179 mass_string = evaluator.full_model.get_particle(
4180 resonance['ParticlePDG']).get('mass')
4181 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
4182
4183
4184 special_mass = (1.0 + offshellness)*mass
4185
4186
4187 prod_proc = base_objects.Process({'legs':base_objects.LegList(
4188 copy.copy(leg) for leg in process.get('legs') if
4189 leg.get('number') not in resonance['FSMothersNumbers'])})
4190
4191
4192
4193 prod_proc.get('legs').append(base_objects.Leg({
4194 'number':max(l.get('number') for l in process.get('legs'))+1,
4195 'state':True,
4196 'id':0}))
4197
4198 decay_proc = base_objects.Process({'legs':base_objects.LegList(
4199 copy.copy(leg) for leg in process.get('legs') if leg.get('number')
4200 in resonance['FSMothersNumbers'] and not leg.get('state')==False)})
4201
4202
4203
4204
4205 decay_proc.get('legs').insert(0,base_objects.Leg({
4206 'number':-1,
4207 'state':False,
4208 'id':0}))
4209 prod_kinematic = evaluator.get_momenta(prod_proc, options=options,
4210 special_mass=special_mass)[0]
4211 decay_kinematic = evaluator.get_momenta(decay_proc, options=options,
4212 special_mass=special_mass)[0]
4213 momenta = glue_momenta(prod_kinematic,decay_kinematic)
4214
4215
4216
4217 ordered_momenta = [(prod_proc.get('legs')[i].get('number'),momenta[i])
4218 for i in range(len(prod_proc.get('legs'))-1)]
4219
4220 ordered_momenta += [(decay_proc.get('legs')[-i].get('number'),
4221 momenta[-i]) for i in range(1,len(decay_proc.get('legs')))]
4222
4223
4224 return [m[1] for m in sorted(ordered_momenta, key = lambda el: el[0])]
4225
4226
4227
4228 @misc.mute_logger()
4229 def get_width(PDG, lambdaCMS, param_card):
4230 """ Returns the width to use for particle with absolute PDG 'PDG' and
4231 for the the lambdaCMS value 'lambdaCMS' using the cache if possible."""
4232
4233
4234
4235 if abs(PDG) in [abs(leg.get('id')) for leg in process.get('legs')]:
4236 return 0.0
4237
4238 particle = evaluator.full_model.get_particle(PDG)
4239
4240
4241
4242 if particle.get('ghost') or particle.get('goldstone'):
4243 return 0.0
4244
4245
4246 if particle.get('width')=='ZERO':
4247 return 0.0
4248
4249 if (PDG,lambdaCMS) in options['cached_widths']:
4250 return options['cached_widths'][(PDG,lambdaCMS)]
4251
4252 if options['recompute_width'] == 'never':
4253 width = evaluator.full_model.\
4254 get('parameter_dict')[particle.get('width')].real
4255 else:
4256
4257 if aloha.complex_mass:
4258 raise MadGraph5Error, "The width for particle with PDG %d and"%PDG+\
4259 " lambdaCMS=%f should have already been "%lambdaCMS+\
4260 "computed during the NWA run."
4261
4262
4263 if options['recompute_width'] in ['always','first_time']:
4264 particle_name = particle.get_name()
4265 with misc.TMP_directory(dir=options['output_path']) as path:
4266 param_card.write(pjoin(path,'tmp.dat'))
4267
4268
4269
4270 command = '%s --output=%s'%(particle_name,pjoin(path,'tmp.dat'))+\
4271 ' --path=%s --body_decay=2'%pjoin(path,'tmp.dat')+\
4272 ' --precision_channel=0.001'
4273
4274 param_card.write(pjoin(options['output_path'],'tmp.dat'))
4275
4276
4277
4278 orig_model = options['cmd']._curr_model
4279 orig_fortran_model = options['cmd']._curr_fortran_model
4280 options['cmd'].do_compute_widths(command, evaluator.full_model)
4281
4282 options['cmd']._curr_model = orig_model
4283 options['cmd']._curr_fortran_model = orig_fortran_model
4284
4285
4286 evaluator.full_model.set_parameters_and_couplings(
4287 param_card=param_card)
4288 try:
4289 tmp_param_card = check_param_card.ParamCard(pjoin(path,'tmp.dat'))
4290 except:
4291 raise MadGraph5Error, 'Error occured during width '+\
4292 'computation with command:\n compute_widths %s'%command
4293 width = tmp_param_card['decay'].get(PDG).value
4294
4295
4296
4297
4298
4299
4300
4301 if options['recompute_width'] in ['never','first_time']:
4302
4303 for lam in options['lambdaCMS']:
4304 options['cached_widths'][(PDG,lam)]=width*(lam/lambdaCMS)
4305 else:
4306 options['cached_widths'][(PDG,lambdaCMS)] = width
4307
4308 return options['cached_widths'][(PDG,lambdaCMS)]
4309
4310 def get_order(diagrams, diagsName):
4311 """Compute the common summed of coupling orders used for this cms check
4312 in the diagrams specified. When inconsistency occurs, use orderName
4313 in the warning message if throwm."""
4314
4315 orders = set([])
4316 for diag in diagrams:
4317 diag_orders = diag.calculate_orders()
4318 orders.add(sum((diag_orders[order] if order in diag_orders else 0)
4319 for order in options['expansion_orders']))
4320 if len(orders)>1:
4321 logger.warning(msg%('%s '%diagsName,str(orders)))
4322 return min(list(orders))
4323 else:
4324 return list(orders)[0]
4325
4326 MLoptions = copy.copy(options['MLOptions'])
4327
4328 MLoptions['DoubleCheckHelicityFilter'] = False
4329
4330
4331 for tweak in options['tweak']['custom']:
4332 if tweak.startswith('seed'):
4333 try:
4334 new_seed = int(tweak[4:])
4335 except ValueError:
4336 raise MadGraph5Error, "Seed '%s' is not of the right format 'seed<int>'."%tweak
4337 random.seed(new_seed)
4338
4339 mode = 'CMS' if aloha.complex_mass else 'NWA'
4340 for i, leg in enumerate(process.get('legs')):
4341 leg.set('number', i+1)
4342
4343 logger.info("Running CMS check for process %s (now doing %s scheme)" % \
4344 ( process.nice_string().replace('Process:', 'process'), mode))
4345
4346 proc_dir = None
4347 resonances = None
4348 warning_msg = "All %sdiagrams do not share the same sum of orders "+\
4349 "%s; found %%s."%(','.join(options['expansion_orders']))+\
4350 " This potentially problematic for the CMS check."
4351 if NLO:
4352
4353
4354
4355 if options['name']=='auto':
4356 proc_name = "%s%s_%s%s__%s__"%(('SAVED' if options['reuse'] else ''),
4357 temp_dir_prefix, '_'.join(process.shell_string().split('_')[1:]),
4358 ('_' if process.get('perturbation_couplings') else '')+
4359 '_'.join(process.get('perturbation_couplings')),mode)
4360 else:
4361 proc_name = "%s%s_%s__%s__"%(('SAVED' if options['reuse'] else ''),
4362 temp_dir_prefix,options['name'], mode)
4363
4364 timing, matrix_element = generate_loop_matrix_element(process,
4365 options['reuse'], output_path=options['output_path'],
4366 cmd = options['cmd'], proc_name=proc_name,
4367 loop_filter=options['loop_filter'])
4368 if matrix_element is None:
4369
4370 return None
4371
4372 reusing = isinstance(matrix_element, base_objects.Process)
4373 proc_dir = pjoin(options['output_path'],proc_name)
4374
4375
4376 infos = evaluator.setup_process(matrix_element, proc_dir,
4377 reusing = reusing, param_card = options['param_card'],
4378 MLOptions=MLoptions)
4379
4380 evaluator.fix_MadLoopParamCard(pjoin(proc_dir,'Cards'),
4381 mp = None, loop_filter = True,MLOptions=MLoptions)
4382
4383
4384 tmp_card_backup = pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__')
4385 if os.path.isfile(tmp_card_backup):
4386
4387 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4388 " Now reverting 'param_card.dat' to its original value.")
4389 shutil.copy(tmp_card_backup, pjoin(proc_dir, 'Cards','param_card.dat'))
4390 else:
4391
4392 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'), tmp_card_backup)
4393
4394 tmp_modelfunc_backup = pjoin(proc_dir,'Source','MODEL',
4395 'model_functions.f__TemporaryBackup__')
4396 if os.path.isfile(tmp_modelfunc_backup):
4397
4398 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4399 " Now reverting 'model_functions.f' to its original value.")
4400 shutil.copy(tmp_modelfunc_backup, pjoin(proc_dir,'Source','MODEL',
4401 'model_functions.f'))
4402 evaluator.apply_log_tweak(proc_dir, 'recompile')
4403 else:
4404
4405 shutil.copy(pjoin(proc_dir,'Source','MODEL','model_functions.f'),
4406 tmp_modelfunc_backup)
4407
4408
4409 MadLoopInitializer.fix_PSPoint_in_check(pjoin(proc_dir,'SubProcesses'),
4410 read_ps = True, npoints = 1, hel_config = options['helicity'],
4411 split_orders=options['split_orders'])
4412
4413
4414
4415 for dir in misc.glob('P*_*', pjoin(proc_dir,'SubProcesses')):
4416 if not (re.search(r'.*P\d+_\w*$', dir) or not os.path.isdir(dir)):
4417 continue
4418 try:
4419 os.remove(pjoin(dir,'check'))
4420 os.remove(pjoin(dir,'check_sa.o'))
4421 except OSError:
4422 pass
4423
4424 with open(os.devnull, 'w') as devnull:
4425 retcode = subprocess.call(['make','check'],
4426 cwd=dir, stdout=devnull, stderr=devnull)
4427 if retcode != 0:
4428 raise MadGraph5Error, "Compilation error with "+\
4429 "'make check' in %s"%dir
4430
4431
4432 pkl_path = pjoin(proc_dir,'resonance_specs.pkl')
4433 if reusing:
4434
4435
4436 if not os.path.isfile(pkl_path):
4437 raise InvalidCmd('The folder %s could'%proc_dir+\
4438 " not be reused because the resonance specification file "+
4439 "'resonance_specs.pkl' is missing.")
4440 else:
4441 proc_name, born_order, loop_order, resonances = \
4442 save_load_object.load_from_file(pkl_path)
4443
4444
4445 for res in resonances:
4446 set_PSpoint(res, force_other_res_offshell=resonances)
4447
4448
4449 if isinstance(opt, list):
4450 opt.append((proc_name, resonances))
4451 else:
4452 resonances = opt
4453 else:
4454 helas_born_diagrams = matrix_element.get_born_diagrams()
4455 if len(helas_born_diagrams)==0:
4456 logger.warning('The CMS check for loop-induced process is '+\
4457 'not yet available (nor is it very interesting).')
4458 return None
4459 born_order = get_order(helas_born_diagrams,'Born')
4460 loop_order = get_order(matrix_element.get_loop_diagrams(),'loop')
4461
4462
4463 if isinstance(opt, list):
4464 opt.append((process.base_string(),find_resonances(helas_born_diagrams)))
4465 resonances = opt[-1][1]
4466 else:
4467 resonances = opt
4468
4469
4470 save_load_object.save_to_file(pkl_path, (process.base_string(),
4471 born_order, loop_order,resonances))
4472
4473 else:
4474
4475 try:
4476 amplitude = diagram_generation.Amplitude(process)
4477 except InvalidCmd:
4478 logging.info("No diagrams for %s" % \
4479 process.nice_string().replace('Process', 'process'))
4480 return None
4481 if not amplitude.get('diagrams'):
4482
4483 logging.info("No diagrams for %s" % \
4484 process.nice_string().replace('Process', 'process'))
4485 return None
4486
4487 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4488 gen_color=True)
4489 diagrams = matrix_element.get('diagrams')
4490 born_order = get_order(diagrams,'Born')
4491
4492 loop_order = -1
4493
4494 if isinstance(opt, list):
4495 opt.append((process.base_string(),find_resonances(diagrams)))
4496 resonances = opt[-1][1]
4497 else:
4498 resonances= opt
4499
4500 if len(resonances)==0:
4501 logger.info("No resonance found for process %s."\
4502 %process.base_string())
4503 return None
4504
4505
4506 if not options['cached_param_card'][mode][0]:
4507 if NLO:
4508 param_card = check_param_card.ParamCard(
4509 pjoin(proc_dir,'Cards','param_card.dat'))
4510 else:
4511 param_card = check_param_card.ParamCard(
4512 StringIO.StringIO(evaluator.full_model.write_param_card()))
4513 options['cached_param_card'][mode][0] = param_card
4514 name2block, _ = param_card.analyze_param_card()
4515 options['cached_param_card'][mode][1] = name2block
4516
4517 else:
4518 param_card = options['cached_param_card'][mode][0]
4519 name2block = options['cached_param_card'][mode][1]
4520
4521
4522 if loop_order != -1 and (loop_order+born_order)%2 != 0:
4523 raise MadGraph5Error, 'The summed squared matrix element '+\
4524 " order '%d' is not even."%(loop_order+born_order)
4525 result = {'born_order':born_order,
4526 'loop_order': (-1 if loop_order==-1 else (loop_order+born_order)/2),
4527 'resonances_result':[]}
4528
4529
4530 if NLO:
4531 try:
4532 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'),
4533 pjoin(proc_dir,'Cards','param_card.dat__backUp__'))
4534 except:
4535 pass
4536
4537
4538 had_log_tweaks=False
4539 if NLO:
4540 for tweak in options['tweak']['custom']:
4541 if tweak.startswith('seed'):
4542 continue
4543 try:
4544 logstart, logend = tweak.split('->')
4545 except:
4546 raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
4547 if logstart in ['logp','logm', 'log'] and \
4548 logend in ['logp','logm', 'log']:
4549 if NLO:
4550 evaluator.apply_log_tweak(proc_dir, [logstart, logend])
4551 had_log_tweaks = True
4552 else:
4553 raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
4554 if had_log_tweaks:
4555 evaluator.apply_log_tweak(proc_dir, 'recompile')
4556
4557
4558 if options['resonances']=='all':
4559 resonances_to_run = resonances
4560 elif isinstance(options['resonances'],int):
4561 resonances_to_run = resonances[:options['resonances']]
4562 elif isinstance(options['resonances'],list):
4563 resonances_to_run = []
4564 for res in resonances:
4565 for res_selection in options['resonances']:
4566 if abs(res['ParticlePDG'])==res_selection[0] and \
4567 res['FSMothersNumbers']==set(res_selection[1]):
4568 resonances_to_run.append(res)
4569 break
4570 else:
4571 raise InvalidCmd("Resonance selection '%s' not reckognized"%\
4572 str(options['resonances']))
4573
4574
4575
4576 if NLO and options['show_plot']:
4577 widgets = ['ME evaluations:', pbar.Percentage(), ' ',
4578 pbar.Bar(),' ', pbar.ETA(), ' ']
4579 progress_bar = pbar.ProgressBar(widgets=widgets,
4580 maxval=len(options['lambdaCMS'])*len(resonances_to_run), fd=sys.stdout)
4581 progress_bar.update(0)
4582
4583 sys.stdout.flush()
4584 else:
4585 progress_bar = None
4586
4587 for resNumber, res in enumerate(resonances_to_run):
4588
4589
4590 result['resonances_result'].append({'resonance':res,'born':[]})
4591 if NLO:
4592 result['resonances_result'][-1]['finite'] = []
4593
4594 for lambdaNumber, lambdaCMS in enumerate(options['lambdaCMS']):
4595
4596
4597 new_param_card = check_param_card.ParamCard(param_card)
4598
4599 for param, replacement in options['expansion_parameters'].items():
4600
4601
4602 orig_param = param.replace('__tmpprefix__','')
4603 if orig_param not in name2block:
4604
4605
4606
4607 continue
4608 for block, lhaid in name2block[orig_param]:
4609 orig_value = float(param_card[block].get(lhaid).value)
4610 new_value = eval(replacement,
4611 {param:orig_value,'lambdacms':lambdaCMS})
4612 new_param_card[block].get(lhaid).value=new_value
4613
4614
4615
4616
4617
4618
4619
4620
4621 evaluator.full_model.set_parameters_and_couplings(
4622 param_card=new_param_card)
4623
4624 for decay in new_param_card['decay'].keys():
4625 if mode=='CMS':
4626 new_width = get_width(abs(decay[0]), lambdaCMS,
4627 new_param_card)
4628 else:
4629 new_width = 0.0
4630 new_param_card['decay'].get(decay).value= new_width
4631
4632
4633 evaluator.full_model.set_parameters_and_couplings(
4634 param_card=new_param_card)
4635 if NLO:
4636 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4637
4638
4639 if lambdaCMS==1.0 and mode=='CMS' and \
4640 options['recompute_width'] in ['always','first_time']:
4641 new_param_card.write(pjoin(proc_dir,
4642 'Cards','param_card.dat_recomputed_widths'))
4643
4644
4645
4646 if mode=='NWA' and (options['recompute_width']=='always' or (
4647 options['recompute_width']=='first_time' and lambdaCMS==1.0)):
4648
4649 tmp_param_card = check_param_card.ParamCard(new_param_card)
4650
4651
4652 for decay in new_param_card['decay'].keys():
4653 particle_name = evaluator.full_model.get_particle(\
4654 abs(decay[0])).get_name()
4655 new_width = get_width(abs(decay[0]),lambdaCMS,new_param_card)
4656 tmp_param_card['decay'].get(decay).value = new_width
4657 if not options['has_FRdecay'] and new_width != 0.0 and \
4658 (abs(decay[0]),lambdaCMS) not in options['cached_widths']:
4659 logger.info('Numerically computed width of particle'+\
4660 ' %s for lambda=%.4g : %-9.6gGeV'%
4661 (particle_name,lambdaCMS,new_width))
4662
4663
4664
4665 if lambdaCMS==1.0 and NLO:
4666 tmp_param_card.write(pjoin(proc_dir,
4667 'Cards','param_card.dat_recomputed_widths'))
4668
4669
4670 for param, replacement in options['tweak']['params'].items():
4671
4672
4673 orig_param = param.replace('__tmpprefix__','')
4674
4675 if orig_param.lower() == 'allwidths':
4676
4677 for decay in new_param_card['decay'].keys():
4678 orig_value = float(new_param_card['decay'].get(decay).value)
4679 new_value = eval(replacement,
4680 {param:orig_value,'lambdacms':lambdaCMS})
4681 new_param_card['decay'].get(decay).value = new_value
4682 continue
4683 if orig_param not in name2block:
4684
4685
4686 continue
4687 for block, lhaid in name2block[orig_param]:
4688 orig_value = float(new_param_card[block].get(lhaid).value)
4689 new_value = eval(replacement,
4690 {param:orig_value,'lambdacms':lambdaCMS})
4691 new_param_card[block].get(lhaid).value=new_value
4692
4693 if options['tweak']['params']:
4694
4695 evaluator.full_model.set_parameters_and_couplings(
4696 param_card=new_param_card)
4697 if NLO:
4698 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4699
4700
4701 if NLO:
4702 ME_res = LoopMatrixElementEvaluator.get_me_value(process, 0,
4703 proc_dir, PSpoint=res['PS_point_used'], verbose=False,
4704 format='dict', skip_compilation=True)
4705
4706
4707
4708
4709 result['resonances_result'][-1]['born'].append(ME_res['born'])
4710 result['resonances_result'][-1]['finite'].append(
4711 ME_res['finite']*ME_res['born']*ME_res['alphaS_over_2pi'])
4712 else:
4713 ME_res = evaluator.evaluate_matrix_element(matrix_element,
4714 p=res['PS_point_used'], auth_skipping=False, output='m2')[0]
4715 result['resonances_result'][-1]['born'].append(ME_res)
4716 if not progress_bar is None:
4717 progress_bar.update(resNumber*len(options['lambdaCMS'])+\
4718 (lambdaNumber+1))
4719
4720 sys.stdout.flush()
4721
4722
4723 log_reversed = False
4724 for tweak in options['tweak']['custom']:
4725 if tweak.startswith('log') and had_log_tweaks:
4726 if log_reversed:
4727 continue
4728 if NLO:
4729 evaluator.apply_log_tweak(proc_dir, 'default')
4730 evaluator.apply_log_tweak(proc_dir, 'recompile')
4731 log_reversed = True
4732
4733
4734 evaluator.full_model.set_parameters_and_couplings(param_card=param_card)
4735 if NLO:
4736 try:
4737 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat__backUp__'),
4738 pjoin(proc_dir,'Cards','param_card.dat'))
4739 except:
4740 param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4741
4742
4743
4744 try:
4745 os.remove(pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__'))
4746 os.remove(pjoin(proc_dir,'Source','MODEL',
4747 'model_functions.f__TemporaryBackup__'))
4748 except:
4749 pass
4750
4751 return (process.nice_string().replace('Process:', '').strip(),result)
4752
4753 -def get_value(process, evaluator, p=None, options=None):
4754 """Return the value/momentum for a phase space point"""
4755
4756 for i, leg in enumerate(process.get('legs')):
4757 leg.set('number', i+1)
4758
4759 logger.info("Checking %s in %s gauge" % \
4760 ( process.nice_string().replace('Process:', 'process'),
4761 'unitary' if aloha.unitary_gauge else 'feynman'))
4762
4763 legs = process.get('legs')
4764
4765
4766 try:
4767 if process.get('perturbation_couplings')==[]:
4768 amplitude = diagram_generation.Amplitude(process)
4769 else:
4770 amplitude = loop_diagram_generation.LoopAmplitude(process)
4771 except InvalidCmd:
4772 logging.info("No diagrams for %s" % \
4773 process.nice_string().replace('Process', 'process'))
4774 return None
4775
4776 if not amplitude.get('diagrams'):
4777
4778 logging.info("No diagrams for %s" % \
4779 process.nice_string().replace('Process', 'process'))
4780 return None
4781
4782 if not p:
4783
4784 p, w_rambo = evaluator.get_momenta(process, options)
4785
4786
4787 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
4788 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4789 gen_color = True)
4790 else:
4791 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
4792 gen_color = True, optimized_output = evaluator.loop_optimized_output)
4793
4794 mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p,
4795 output='jamp',options=options)
4796
4797 if mvalue and mvalue['m2']:
4798 return {'process':process.base_string(),'value':mvalue,'p':p}
4799
4801 """Present the results of a comparison in a nice list format for loop
4802 processes. It detail the results from each lorentz transformation performed.
4803 """
4804
4805 process = comparison_results[0]['process']
4806 results = comparison_results[0]['results']
4807
4808
4809 threshold_rotations = 1e-6
4810
4811
4812
4813 threshold_boosts = 1e-3
4814 res_str = "%s" % process.base_string()
4815
4816 transfo_col_size = 17
4817 col_size = 18
4818 transfo_name_header = 'Transformation name'
4819
4820 if len(transfo_name_header) + 1 > transfo_col_size:
4821 transfo_col_size = len(transfo_name_header) + 1
4822
4823 misc.sprint(results)
4824 for transfo_name, value in results:
4825 if len(transfo_name) + 1 > transfo_col_size:
4826 transfo_col_size = len(transfo_name) + 1
4827
4828 res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \
4829 fixed_string_length("Value", col_size) + \
4830 fixed_string_length("Relative diff.", col_size) + "Result"
4831
4832 ref_value = results[0]
4833 res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \
4834 fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size)
4835
4836
4837 all_pass = True
4838 for res in results[1:]:
4839 threshold = threshold_boosts if 'BOOST' in res[0].upper() else \
4840 threshold_rotations
4841 rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\
4842 /((ref_value[1]['m2']+res[1]['m2'])/2.0))
4843 this_pass = rel_diff <= threshold
4844 if not this_pass:
4845 all_pass = False
4846 res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \
4847 fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \
4848 fixed_string_length("%1.10e" % rel_diff, col_size) + \
4849 ("Passed" if this_pass else "Failed")
4850 if all_pass:
4851 res_str += '\n' + 'Summary: passed'
4852 else:
4853 res_str += '\n' + 'Summary: failed'
4854
4855 return res_str
4856
4858 """Present the results of a comparison in a nice list format
4859 if output='fail' return the number of failed process -- for test--
4860 """
4861
4862
4863 if comparison_results[0]['process']['perturbation_couplings']!=[]:
4864 return output_lorentz_inv_loop(comparison_results, output)
4865
4866 proc_col_size = 17
4867
4868 threshold=1e-10
4869 process_header = "Process"
4870
4871 if len(process_header) + 1 > proc_col_size:
4872 proc_col_size = len(process_header) + 1
4873
4874 for proc, values in comparison_results:
4875 if len(proc) + 1 > proc_col_size:
4876 proc_col_size = len(proc) + 1
4877
4878 col_size = 18
4879
4880 pass_proc = 0
4881 fail_proc = 0
4882 no_check_proc = 0
4883
4884 failed_proc_list = []
4885 no_check_proc_list = []
4886
4887 res_str = fixed_string_length(process_header, proc_col_size) + \
4888 fixed_string_length("Min element", col_size) + \
4889 fixed_string_length("Max element", col_size) + \
4890 fixed_string_length("Relative diff.", col_size) + \
4891 "Result"
4892
4893 for one_comp in comparison_results:
4894 proc = one_comp['process'].base_string()
4895 data = one_comp['results']
4896
4897 if data == 'pass':
4898 no_check_proc += 1
4899 no_check_proc_list.append(proc)
4900 continue
4901
4902 values = [data[i]['m2'] for i in range(len(data))]
4903
4904 min_val = min(values)
4905 max_val = max(values)
4906 diff = (max_val - min_val) / abs(max_val)
4907
4908 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
4909 fixed_string_length("%1.10e" % min_val, col_size) + \
4910 fixed_string_length("%1.10e" % max_val, col_size) + \
4911 fixed_string_length("%1.10e" % diff, col_size)
4912
4913 if diff < threshold:
4914 pass_proc += 1
4915 proc_succeed = True
4916 res_str += "Passed"
4917 else:
4918 fail_proc += 1
4919 proc_succeed = False
4920 failed_proc_list.append(proc)
4921 res_str += "Failed"
4922
4923
4924
4925
4926
4927 if len(data[0]['jamp'])!=0:
4928 for k in range(len(data[0]['jamp'][0])):
4929 sum = [0] * len(data)
4930
4931 for j in range(len(data[0]['jamp'])):
4932
4933 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
4934 sum = [sum[i] + values[i] for i in range(len(values))]
4935
4936
4937 min_val = min(sum)
4938 max_val = max(sum)
4939 if not max_val:
4940 continue
4941 diff = (max_val - min_val) / max_val
4942
4943 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
4944 fixed_string_length("%1.10e" % min_val, col_size) + \
4945 fixed_string_length("%1.10e" % max_val, col_size) + \
4946 fixed_string_length("%1.10e" % diff, col_size)
4947
4948 if diff > 1e-10:
4949 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
4950 fail_proc += 1
4951 pass_proc -= 1
4952 failed_proc_list.append(proc)
4953 res_str += tmp_str + "Failed"
4954 elif not proc_succeed:
4955 res_str += tmp_str + "Passed"
4956
4957
4958
4959 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
4960 (pass_proc, pass_proc + fail_proc,
4961 fail_proc, pass_proc + fail_proc)
4962
4963 if fail_proc != 0:
4964 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
4965 if no_check_proc:
4966 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
4967
4968 if output == 'text':
4969 return res_str
4970 else:
4971 return fail_proc
4972
4974 """Present the results of a comparison in a nice list format
4975 if output='fail' return the number of failed process -- for test--
4976 """
4977
4978 proc_col_size = 17
4979
4980
4981
4982 pert_coupl = comparison_results[0]['perturbation_couplings']
4983 comparison_results = comparison_results[1:]
4984
4985 if pert_coupl:
4986 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
4987 else:
4988 process_header = "Process"
4989
4990 if len(process_header) + 1 > proc_col_size:
4991 proc_col_size = len(process_header) + 1
4992
4993 for data in comparison_results:
4994 proc = data['process']
4995 if len(proc) + 1 > proc_col_size:
4996 proc_col_size = len(proc) + 1
4997
4998 pass_proc = 0
4999 fail_proc = 0
5000 no_check_proc = 0
5001
5002 failed_proc_list = []
5003 no_check_proc_list = []
5004
5005 col_size = 18
5006
5007 res_str = fixed_string_length(process_header, proc_col_size) + \
5008 fixed_string_length("Unitary", col_size) + \
5009 fixed_string_length("Feynman", col_size) + \
5010 fixed_string_length("Relative diff.", col_size) + \
5011 "Result"
5012
5013 for one_comp in comparison_results:
5014 proc = one_comp['process']
5015 data = [one_comp['value_unit'], one_comp['value_feynm']]
5016
5017
5018 if data[0] == 'pass':
5019 no_check_proc += 1
5020 no_check_proc_list.append(proc)
5021 continue
5022
5023 values = [data[i]['m2'] for i in range(len(data))]
5024
5025 min_val = min(values)
5026 max_val = max(values)
5027
5028
5029 diff = (max_val - min_val) / abs(max_val)
5030
5031 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
5032 fixed_string_length("%1.10e" % values[0], col_size) + \
5033 fixed_string_length("%1.10e" % values[1], col_size) + \
5034 fixed_string_length("%1.10e" % diff, col_size)
5035
5036 if diff < 1e-8:
5037 pass_proc += 1
5038 proc_succeed = True
5039 res_str += "Passed"
5040 else:
5041 fail_proc += 1
5042 proc_succeed = False
5043 failed_proc_list.append(proc)
5044 res_str += "Failed"
5045
5046
5047
5048
5049
5050 if len(data[0]['jamp'])>0:
5051 for k in range(len(data[0]['jamp'][0])):
5052 sum = [0, 0]
5053
5054 for j in range(len(data[0]['jamp'])):
5055
5056 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
5057 sum = [sum[i] + values[i] for i in range(len(values))]
5058
5059
5060 min_val = min(sum)
5061 max_val = max(sum)
5062 if not max_val:
5063 continue
5064 diff = (max_val - min_val) / max_val
5065
5066 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
5067 fixed_string_length("%1.10e" % sum[0], col_size) + \
5068 fixed_string_length("%1.10e" % sum[1], col_size) + \
5069 fixed_string_length("%1.10e" % diff, col_size)
5070
5071 if diff > 1e-10:
5072 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
5073 fail_proc += 1
5074 pass_proc -= 1
5075 failed_proc_list.append(proc)
5076 res_str += tmp_str + "Failed"
5077 elif not proc_succeed:
5078 res_str += tmp_str + "Passed"
5079
5080
5081
5082 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
5083 (pass_proc, pass_proc + fail_proc,
5084 fail_proc, pass_proc + fail_proc)
5085
5086 if fail_proc != 0:
5087 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
5088 if no_check_proc:
5089 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
5090
5091
5092 if output == 'text':
5093 return res_str
5094 else:
5095 return fail_proc
5096
5097 -def CMS_save_path(extension, cms_res, used_model, opts, output_path=None):
5098 """Creates a suitable filename for saving these results."""
5099
5100 if opts['name']=='auto' and opts['analyze']!='None':
5101
5102 return '%s.%s'%(os.path.splitext(opts['analyze'].split(',')[0])\
5103 [0],extension)
5104
5105 if opts['name']!='auto':
5106 basename = opts['name']
5107 else:
5108 prefix = 'cms_check_'
5109
5110 if len(cms_res['ordered_processes'])==1:
5111 proc = cms_res['ordered_processes'][0]
5112 replacements = {' ':'','+':'p','-':'m','~':'x', '>':'_','=':'eq'}
5113
5114 try:
5115 proc=proc[:proc.index('[')]
5116 except ValueError:
5117 pass
5118
5119 for key, value in replacements.items():
5120 proc = proc.replace(key,value)
5121
5122 basename =prefix+proc+'_%s_'%used_model.get('name')+\
5123 ( ('_'+'_'.join(cms_res['perturbation_orders'])) if \
5124 cms_res['perturbation_orders']!=[] else '')
5125
5126 else:
5127 basename = prefix+datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
5128
5129 suffix = '_%s'%opts['tweak']['name'] if opts['tweak']['name']!='' else ''
5130 if output_path:
5131 return pjoin(output_path,'%s%s.%s'%(basename,suffix,extension))
5132 else:
5133 return '%s%s.%s'%(basename,suffix,extension)
5134
5136 """ Outputs nicely the outcome of the complex mass scheme check performed
5137 by varying the width in the offshell region of resonances found for eahc process.
5138 Output just specifies whether text should be returned or a list of failed
5139 processes. Use 'concise_text' for a consise report of the results."""
5140
5141 pert_orders=result['perturbation_orders']
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151 diff_lambda_power = options['diff_lambda_power']
5152
5153
5154
5155
5156
5157
5158
5159 if 'has_FRdecay' in result:
5160 has_FRdecay = result['has_FRdecay']
5161 else:
5162 has_FRdecay = False
5163
5164 if not pert_orders:
5165 CMS_test_threshold = 1e-3
5166 else:
5167
5168
5169
5170
5171
5172
5173 if not has_FRdecay and ('recomputed_with' not in result or \
5174 result['recompute_width'] in ['always','first_time']):
5175 CMS_test_threshold = 2e-2*(1.0e-4/min(result['lambdaCMS']))
5176 else:
5177
5178
5179 CMS_test_threshold = 2e-2*(1.0e-5/min(result['lambdaCMS']))
5180
5181
5182
5183
5184 consideration_threshold = min(CMS_test_threshold/10.0, 0.05)
5185
5186
5187 group_val = 3
5188
5189
5190
5191
5192 diff_zero_threshold = 1e-3
5193
5194
5195 lambda_range = options['lambda_plot_range']
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206 res_str = ''
5207
5208 concise_str = ''
5209 concise_data = '%%(process)-%ds%%(asymptot)-15s%%(cms_check)-25s%%(status)-25s\n'
5210 concise_repl_dict = {'Header':{'process':'Process',
5211 'asymptot':'Asymptot',
5212 'cms_check':'Deviation to asymptot',
5213 'status':'Result'}}
5214
5215
5216
5217
5218
5219 useLatexParticleName = 'built-in'
5220 name2tex = {'e+':r'e^+','w+':r'W^+','a':r'\gamma','g':'g',
5221 'e-':r'e^-','w-':r'W^-','z':'Z','h':'H',
5222 'mu+':r'\mu^+',
5223 'mu-':r'\mu^-',
5224 'ta+':r'\tau^+',
5225 'ta-':r'\tau^-'}
5226 for p in ['e','m','t']:
5227 d = {'e':'e','m':r'\mu','t':r'\tau'}
5228 name2tex['v%s'%p]=r'\nu_{%s}'%d[p]
5229 name2tex['v%s~'%p]=r'\bar{\nu_{%s}}'%d[p]
5230
5231 for p in ['u','d','c','s','b','t']:
5232 name2tex[p]=p
5233 name2tex['%s~'%p]=r'\bar{%s}'%p
5234
5235 def format_particle_name(particle, latex=useLatexParticleName):
5236 p_name = particle
5237 if latex=='model':
5238 try:
5239 texname = model.get_particle(particle).get('texname')
5240 if texname and texname!='none':
5241 p_name = r'$\displaystyle %s$'%texname
5242 except:
5243 pass
5244 elif latex=='built-in':
5245 try:
5246 p_name = r'$\displaystyle %s$'%name2tex[particle]
5247 except:
5248 pass
5249 return p_name
5250
5251 def resonance_str(resonance, latex=useLatexParticleName):
5252 """ Provides a concise string to characterize the resonance """
5253 particle_name = model.get_particle(resonance['ParticlePDG']).get_name()
5254 mothersID=['%d'%n for n in sorted(resonance['FSMothersNumbers'])]
5255 return r"%s [%s]"%(format_particle_name(particle_name,latex=latex),
5256 ','.join(mothersID))
5257
5258 def format_title(process, resonance):
5259 """ Format the plot title given the process and resonance """
5260
5261 process_string = []
5262 for particle in process.split():
5263 if particle=='$$':
5264 process_string.append(r'\$\$')
5265 continue
5266 if particle=='>':
5267 process_string.append(r'$\displaystyle \rightarrow$')
5268 continue
5269 process_string.append(format_particle_name(particle))
5270
5271 if resonance=='':
5272 return r'CMS check for %s' %(' '.join(process_string))
5273 else:
5274 return r'CMS check for %s ( resonance %s )'\
5275 %(' '.join(process_string),resonance)
5276
5277 def guess_lambdaorder(ME_values_list, lambda_values, expected=None,
5278 proc=None, res=None):
5279 """ Guess the lambda scaling from a list of ME values and return it.
5280 Also compare with the expected result if specified and trigger a
5281 warning if not in agreement."""
5282
5283 bpowers = []
5284 for i, lambdaCMS in enumerate(lambda_values[1:]):
5285 bpowers.append(round(math.log(ME_values_list[0]/ME_values_list[i+1],\
5286 lambda_values[0]/lambdaCMS)))
5287
5288
5289 bpower = sorted([(el, bpowers.count(el)) for el in set(bpowers)],
5290 key = lambda elem: elem[1], reverse=True)[0][0]
5291 if not expected:
5292 return bpower
5293 if bpower != expected:
5294 logger.warning('The apparent scaling of the squared amplitude'+
5295 'seems inconsistent w.r.t to detected value '+
5296 '(%i vs %i). %i will be used.'%(expected,bpower,bpower)+
5297 ' This happend for process %s and resonance %s'%(proc, res))
5298 return bpower
5299
5300 def check_stability(ME_values, lambda_values, lambda_scaling, values_name):
5301 """ Checks if the values passed in argument are stable and return the
5302 stability check outcome warning if it is not precise enough. """
5303
5304 values = sorted([
5305 abs(val*(lambda_values[0]/lambda_values[i])**lambda_scaling) for \
5306 i, val in enumerate(ME_values)])
5307 median = values[len(values)//2]
5308 max_diff = max(abs(values[0]-median),abs(values[-1]-median))
5309 stability = max_diff/median
5310 stab_threshold = 1e-2
5311 if stability >= stab_threshold:
5312 return "== WARNING: Stability check failed for '%s' with stability %.2e.\n"\
5313 %(values_name, stability)
5314 else:
5315 return None
5316
5317 if options['analyze']=='None':
5318 if options['reuse']:
5319 save_path = CMS_save_path('pkl', result, model, options,
5320 output_path=output_path)
5321 buff = "\nThe results of this check have been stored on disk and its "+\
5322 "analysis can be rerun at anytime with the MG5aMC command:\n "+\
5323 " check cms --analyze=%s\n"%save_path
5324 res_str += buff
5325 concise_str += buff
5326 save_load_object.save_to_file(save_path, result)
5327 elif len(result['ordered_processes'])>0:
5328 buff = "\nUse the following synthax if you want to store "+\
5329 "the raw results on disk.\n"+\
5330 " check cms -reuse <proc_def> <options>\n"
5331 res_str += buff
5332 concise_str += buff
5333
5334
5335
5336
5337
5338 checks = []
5339 for process in result['ordered_processes']:
5340 checks.extend([(process,resID) for resID in \
5341 range(len(result[process]['CMS']))])
5342
5343 if options['reuse']:
5344 logFile = open(CMS_save_path(
5345 'log', result, model, options, output_path=output_path),'w')
5346
5347 lambdaCMS_list=result['lambdaCMS']
5348
5349
5350 failed_procs = []
5351
5352
5353 bar = lambda char: char*47
5354
5355
5356 if 'widths_computed' in result:
5357 res_str += '\n%s%s%s\n'%(bar('='),' Widths ',bar('='))
5358 if result['recompute_width'] == 'never':
5359 res_str += '| Widths extracted from the param_card.dat'
5360 else:
5361 res_str += '| Widths computed %s'%('analytically' if has_FRdecay
5362 else 'numerically')
5363 if result['recompute_width'] == 'first_time':
5364 res_str += ' for \lambda = 1'
5365 elif result['recompute_width'] == 'always':
5366 res_str += ' for all \lambda values'
5367 res_str += " using mode '--recompute_width=%s'.\n"%result['recompute_width']
5368 for particle_name, width in result['widths_computed']:
5369 res_str += '| %-10s = %-11.6gGeV\n'%('Width(%s)'%particle_name,width)
5370 res_str += '%s%s%s\n'%(bar('='),'='*8,bar('='))
5371
5372
5373
5374
5375 nstab_points=group_val
5376
5377 differences_target = {}
5378 for process, resID in checks:
5379
5380
5381 concise_repl_dict[process] = {'process':process,
5382 'asymptot':'N/A',
5383 'cms_check':'N/A',
5384 'status':'N/A'}
5385 proc_res = result[process]
5386 cms_res = proc_res['CMS'][resID]
5387 nwa_res = proc_res['NWA'][resID]
5388 resonance = resonance_str(cms_res['resonance'], latex='none')
5389 cms_born=cms_res['born']
5390 nwa_born=nwa_res['born']
5391
5392 res_str += '\n%s%s%s\n'%(bar('='),'='*8,bar('='))
5393
5394 proc_title = "%s (resonance %s)"%(process,resonance)
5395 centering = (bar(2)+8-len(proc_title))//2
5396 res_str += "%s%s\n"%(' '*centering,proc_title)
5397
5398 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5399
5400
5401 if diff_lambda_power!=1:
5402 res_str += "== WARNING diff_lambda_power is not 1 but = %g\n"%diff_lambda_power
5403 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5404
5405 born_power = guess_lambdaorder(nwa_born,lambdaCMS_list,
5406 expected=proc_res['born_order'], proc=process, res=resonance)
5407 stab_cms_born = check_stability(cms_born[-nstab_points:],
5408 lambdaCMS_list[-nstab_points:], born_power, 'CMS Born')
5409 if stab_cms_born:
5410 res_str += stab_cms_born
5411 stab_nwa_born = check_stability(nwa_born[-nstab_points:],
5412 lambdaCMS_list[-nstab_points:], born_power, 'NWA Born')
5413 if stab_nwa_born:
5414 res_str += stab_nwa_born
5415
5416 res_str += "== Kinematic configuration in GeV (E,px,pypz)\n"
5417 for i, p in enumerate(cms_res['resonance']['PS_point_used']):
5418 res_str += " | p%-2.d = "%(i+1)
5419 for pi in p:
5420 res_str += '%-24.17g'%pi if pi<0.0 else ' %-23.17g'%pi
5421 res_str += "\n"
5422
5423 res_str += "== Offshellnesses of all detected resonances\n"
5424 for res_name, offshellness in cms_res['resonance']['offshellnesses']:
5425 res_str += " | %-15s = %f\n"%(res_name, offshellness)
5426 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5427
5428 if not pert_orders:
5429 res_str += "== Born scaling lambda^n_born. nborn = %d\n"%born_power
5430 else:
5431 cms_finite=cms_res['finite']
5432 nwa_finite=nwa_res['finite']
5433 loop_power = guess_lambdaorder(nwa_finite,lambdaCMS_list,
5434 expected=proc_res['loop_order'], proc=process, res=resonance)
5435 res_str += "== Scaling lambda^n. nborn, nloop = %d, %d\n"\
5436 %(born_power,loop_power)
5437 stab_cms_finite = check_stability(cms_finite[-nstab_points:],
5438 lambdaCMS_list[-nstab_points:], loop_power, 'CMS finite')
5439 if stab_cms_finite:
5440 res_str += stab_cms_finite
5441 stab_nwa_finite = check_stability(nwa_finite[-nstab_points:],
5442 lambdaCMS_list[-nstab_points:], loop_power, 'NWA finite')
5443 if stab_nwa_finite:
5444 res_str += stab_nwa_finite
5445
5446 CMSData = []
5447 NWAData = []
5448 DiffData = []
5449 for idata, lam in enumerate(lambdaCMS_list):
5450 if not pert_orders:
5451 new_cms=cms_born[idata]/(lam**born_power)
5452 new_nwa=nwa_born[idata]/(lam**born_power)
5453 else:
5454 new_cms=(cms_finite[idata]+cms_born[idata]-nwa_born[idata])/(lam*nwa_born[idata])
5455 new_nwa=nwa_finite[idata]/(lam*nwa_born[idata])
5456 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5457 CMSData.append(new_cms)
5458 NWAData.append(new_nwa)
5459 DiffData.append(new_diff)
5460
5461
5462
5463
5464
5465
5466 trim_range=int(((1.0-0.6)/2.0)*len(DiffData))
5467 low_diff_median = sorted(DiffData[trim_range:-trim_range])\
5468 [(len(DiffData)-2*trim_range)//2]
5469
5470
5471
5472
5473
5474
5475 current_median = 0
5476
5477 scan_index = 0
5478 reference = abs(sorted(NWAData)[len(NWAData)//2])
5479 if low_diff_median!= 0.0:
5480 if abs(reference/low_diff_median)<diff_zero_threshold:
5481 reference = abs(low_diff_median)
5482 while True:
5483 scanner = DiffData[scan_index:group_val+scan_index]
5484 current_median = sorted(scanner)[len(scanner)//2]
5485
5486
5487 if abs(current_median-low_diff_median)/reference<\
5488 consideration_threshold:
5489 break;
5490 scan_index += 1
5491 if (group_val+scan_index)>=len(DiffData):
5492
5493
5494 logger.warning('The median scanning failed during the CMS check '+
5495 'for process %s'%proc_title+\
5496 'This is means that the difference plot has not stable'+\
5497 'intermediate region and MG5_aMC will arbitrarily consider the'+\
5498 'left half of the values.')
5499 scan_index = -1
5500 break;
5501
5502 if scan_index == -1:
5503 cms_check_data_range = len(DiffData)//2
5504 else:
5505 cms_check_data_range = scan_index + group_val
5506
5507 res_str += "== Data range considered (min, max, n_val) = (%.1e, %.1e, %d)\n"\
5508 %(lambdaCMS_list[-1],lambdaCMS_list[scan_index],
5509 len(lambdaCMS_list)-scan_index)
5510
5511 CMScheck_values = DiffData[cms_check_data_range:]
5512
5513
5514
5515
5516 if scan_index >= 0:
5517
5518 scan_index = len(CMScheck_values)
5519 used_group_val = max(3,group_val)
5520 unstability_found = True
5521 while True:
5522 scanner = CMScheck_values[scan_index-used_group_val:scan_index]
5523 maxdiff = max(abs(scan-low_diff_median) for scan in scanner)
5524 if maxdiff/reference<consideration_threshold:
5525 break;
5526 if (scan_index-used_group_val)==0:
5527
5528
5529 unstability_found = False
5530 break;
5531
5532 scan_index -= 1
5533
5534
5535 if unstability_found:
5536 unstab_check=CMScheck_values[scan_index:]
5537 relative_array = [val > CMScheck_values[scan_index-1] for
5538 val in unstab_check]
5539 upper = relative_array.count(True)
5540 lower = relative_array.count(False)
5541 if not ((lower==0 and upper>=0) or (lower>=0 and upper==0)):
5542 logger.warning(
5543 """For process %s, a numerically unstable region was detected starting from lambda < %.1e.
5544 Look at the plot in this region (and possibly throw more points using the option --lambdaCMS).
5545 If this is indeed a stability issue, then either decrease MLStabThreshold in MadLoop or decrease the
5546 minimum value of lambda to be considered in the CMS check."""\
5547 %(proc_title, lambdaCMS_list[cms_check_data_range+scan_index-1]))
5548
5549
5550
5551
5552 scan_index = 0
5553 max_diff = 0.0
5554 res_str += "== Ref. value used in the ratios (Born NWA) = %s\n"\
5555 %('%.3g'%reference)
5556 res_str += "== Asymptotic difference value detected = %s\n"\
5557 %('%.3g'%low_diff_median)
5558 concise_repl_dict[process]['asymptot'] = '%.3e'%low_diff_median
5559
5560
5561 differences_target[(process,resID)]= low_diff_median
5562
5563 while True:
5564 current_vals = CMScheck_values[scan_index:scan_index+group_val]
5565 max_diff = max(max_diff, abs(low_diff_median-
5566 sorted(current_vals)[len(current_vals)//2])/reference)
5567 if (scan_index+group_val)>=len(CMScheck_values):
5568 break
5569 scan_index += 1
5570
5571
5572 cms_check = (max_diff*100.0, '>' if max_diff>CMS_test_threshold else '<',
5573 CMS_test_threshold*100.0)
5574 res_str += "== CMS check result (threshold) = %.3g%% (%s%.3g%%)\n"%cms_check
5575 concise_repl_dict[process]['cms_check'] = \
5576 "%-10s (%s%.3g%%)"%('%.3g%%'%cms_check[0],cms_check[1],cms_check[2])
5577
5578 if max_diff>CMS_test_threshold:
5579 failed_procs.append((process,resonance))
5580 res_str += "%s %s %s\n"%(bar('='),
5581 'FAILED' if max_diff>CMS_test_threshold else 'PASSED',bar('='))
5582 concise_repl_dict[process]['status'] = 'Failed' if max_diff>CMS_test_threshold \
5583 else 'Passed'
5584
5585 if output=='concise_text':
5586
5587 max_proc_size = max(
5588 [len(process) for process in result['ordered_processes']]+[10])
5589
5590 res_str = concise_str
5591 res_str += '\n'+concise_data%(max_proc_size+4)%concise_repl_dict['Header']
5592 for process in result['ordered_processes']:
5593 res_str += (concise_data%(max_proc_size+4)%concise_repl_dict[process])
5594
5595 if len(checks):
5596 res_str += "Summary: %i/%i passed"%(len(checks)-len(failed_procs),len(checks))+\
5597 ('.\n' if not failed_procs else ', failed checks are for:\n')
5598 else:
5599 return "\nNo CMS check to perform, the process either has no diagram or does not "+\
5600 "not feature any massive s-channel resonance."
5601
5602 for process, resonance in failed_procs:
5603 res_str += "> %s, %s\n"%(process, resonance)
5604
5605 if output=='concise_text':
5606 res_str += '\nMore detailed information on this check available with the command:\n'
5607 res_str += ' MG5_aMC>display checks\n'
5608
5609
5610
5611
5612 if not options['show_plot']:
5613 if options['reuse']:
5614 logFile.write(res_str)
5615 logFile.close()
5616 if output.endswith('text'):
5617 return res_str
5618 else:
5619 return failed_procs
5620
5621 fig_output_file = CMS_save_path('pdf', result, model, options,
5622 output_path=output_path)
5623 base_fig_name = fig_output_file[:-4]
5624 suffix = 1
5625 while os.path.isfile(fig_output_file):
5626 fig_output_file = '%s__%d__.pdf'%(base_fig_name,suffix)
5627 suffix+=1
5628
5629 process_data_plot_dict={}
5630
5631
5632
5633 all_res = [(result, None)]
5634 for i, add_res in enumerate(options['analyze'].split(',')[1:]):
5635 specs =re.match(r'^(?P<filename>.*)\((?P<title>.*)\)$', add_res)
5636 if specs:
5637 filename = specs.group('filename')
5638 title = specs.group('title')
5639 else:
5640 filename = add_res
5641 title = '#%d'%(i+1)
5642
5643 new_result = save_load_object.load_from_file(filename)
5644 if new_result is None:
5645 raise InvalidCmd('The complex mass scheme check result'+
5646 " file below could not be read.\n %s"%filename)
5647 if len(new_result['ordered_processes'])!=len(result['ordered_processes']) \
5648 or len(new_result['lambdaCMS'])!=len(result['lambdaCMS']):
5649 raise self.InvalidCmd('The complex mass scheme check result'+
5650 " file below does not seem compatible.\n %s"%filename)
5651 all_res.append((new_result,title))
5652
5653
5654 for process, resID in checks:
5655 data1=[]
5656 data2=[]
5657 info ={}
5658 for res in all_res:
5659 proc_res = res[0][process]
5660 cms_res = proc_res['CMS'][resID]
5661 nwa_res = proc_res['NWA'][resID]
5662 resonance = resonance_str(cms_res['resonance'])
5663 if options['resonances']!=1:
5664 info['title'] = format_title(process, resonance)
5665 else:
5666 info['title'] = format_title(process, '')
5667
5668 cms_born=cms_res['born']
5669 nwa_born=nwa_res['born']
5670 if len(cms_born) != len(lambdaCMS_list) or\
5671 len(nwa_born) != len(lambdaCMS_list):
5672 raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
5673 ' lambdaCMS values specified for process %s'%process
5674 if pert_orders:
5675 cms_finite=cms_res['finite']
5676 nwa_finite=nwa_res['finite']
5677 if len(cms_finite) != len(lambdaCMS_list) or\
5678 len(nwa_finite) != len(lambdaCMS_list):
5679 raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
5680 ' lambdaCMS values specified for process %s'%process
5681
5682 bpower = guess_lambdaorder(nwa_born,lambdaCMS_list,
5683 expected=proc_res['born_order'], proc=process, res=resonance)
5684
5685 CMSData = []
5686 NWAData = []
5687 DiffData = []
5688 for idata, lam in enumerate(lambdaCMS_list):
5689 if not pert_orders:
5690 new_cms = cms_born[idata]/lam**bpower
5691 new_nwa = nwa_born[idata]/lam**bpower
5692 else:
5693 new_cms=cms_finite[idata]+cms_born[idata]-nwa_born[idata]
5694 new_nwa=nwa_finite[idata]
5695 new_cms /= lam*nwa_born[idata]
5696 new_nwa /= lam*nwa_born[idata]
5697 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5698 CMSData.append(new_cms)
5699 NWAData.append(new_nwa)
5700 DiffData.append(new_diff)
5701 if res[1] is None:
5702 if not pert_orders:
5703 data1.append([r'$\displaystyle CMS\;=\;\mathcal{M}_{CMS}^{(0)}/\lambda^%d$'%bpower,CMSData])
5704 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}_{NWA}^{(0)}/\lambda^%d$'%bpower,NWAData])
5705 else:
5706 data1.append([r'$\displaystyle CMS\;=\;(\mathcal{M}^{(1)}_{CMS}+\mathcal{M}_{CMS}^{(0)}-\mathcal{M}^{(0)}_{NWA})/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',CMSData])
5707 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}^{(1)}_{NWA}/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',NWAData])
5708 data2.append([r'$\displaystyle\Delta\;=\;(CMS-NWA)/\lambda%s$'\
5709 %('' if diff_lambda_power==1 else r'^{%g}'%diff_lambda_power)
5710 ,DiffData])
5711 data2.append([r'Detected asymptot',[differences_target[(process,resID)]
5712 for i in range(len(lambdaCMS_list))]])
5713 else:
5714 data1.append([r'$\displaystyle CMS$ %s'%res[1].replace('_',' '),CMSData])
5715 data1.append([r'$\displaystyle NWA$ %s'%res[1].replace('_',' '),NWAData])
5716 data2.append([r'$\displaystyle\Delta$ %s'%res[1].replace('_',' '),DiffData])
5717
5718 process_data_plot_dict[(process,resID)]=(data1,data2, info)
5719
5720
5721 try:
5722 import matplotlib.pyplot as plt
5723 from matplotlib.backends.backend_pdf import PdfPages
5724 logger.info('Rendering plots... (this can take some time because of the latex labels)')
5725
5726 res_str += \
5727 """\n-----------------------------------------------------------------------------------------------
5728 | In the plots, the Complex Mass Scheme check is successful if the normalized difference |
5729 | between the CMS and NWA result (lower inset) tends to a constant when \lambda goes to zero. |
5730 -----------------------------------------------------------------------------------------------\n"""
5731
5732
5733 if lambda_range[1]>0:
5734 min_lambda_index = -1
5735 for i, lam in enumerate(lambdaCMS_list):
5736 if lam<=lambda_range[1]:
5737 min_lambda_index = i
5738 break
5739 else:
5740 min_lambda_index = 0
5741 if lambda_range[0]>0:
5742 max_lambda_index = -1
5743 for i, lam in enumerate(lambdaCMS_list):
5744 if lam<=lambda_range[0]:
5745 max_lambda_index=i-1
5746 break
5747 else:
5748 max_lambda_index=len(lambdaCMS_list)-1
5749
5750 if max_lambda_index==-1 or min_lambda_index==-1 or \
5751 min_lambda_index==max_lambda_index:
5752 raise InvalidCmd('Invalid lambda plotting range: (%.1e,%.1e)'%\
5753 (lambda_range[0],lambda_range[1]))
5754
5755 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5756 lambdaCMS_list = lambdaCMS_list[min_lambda_index:max_lambda_index+1]
5757
5758 plt.rc('text', usetex=True)
5759 plt.rc('font', family='serif')
5760 pp=PdfPages(fig_output_file)
5761 if len(checks)==0 or len(process_data_plot_dict[checks[0]][1])<=7:
5762 colorlist=['b','r','g','k','c','m','y']
5763 else:
5764 import matplotlib.colors as colors
5765 import matplotlib.cm as mplcm
5766 import matplotlib.colors as colors
5767
5768
5769 cm = plt.get_cmap('gist_rainbow')
5770 cNorm = colors.Normalize(vmin=0, vmax=(len(data2)-1))
5771 scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
5772
5773 colorlist = [scalarMap.to_rgba(i*0.9) for i in range(len(data2))]
5774
5775
5776
5777
5778
5779
5780 legend_size = 10
5781 for iproc, (process, resID) in enumerate(checks):
5782 data1,data2, info=process_data_plot_dict[(process,resID)]
5783
5784 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5785 for i in range(len(data1)):
5786 data1[i][1]=data1[i][1][min_lambda_index:max_lambda_index+1]
5787 for i in range(len(data2)):
5788 data2[i][1]=data2[i][1][min_lambda_index:max_lambda_index+1]
5789 plt.figure(iproc+1)
5790 plt.subplot(211)
5791 minvalue=1e+99
5792 maxvalue=-1e+99
5793 for i, d1 in enumerate(data1):
5794
5795 color=colorlist[i//2]
5796 data_plot=d1[1]
5797 minvalue=min(min(data_plot),minvalue)
5798 maxvalue=max(max(data_plot),maxvalue)
5799 plt.plot(lambdaCMS_list, data_plot, color=color, marker='', \
5800 linestyle=('-' if i%2==0 else '--'),
5801 label=(d1[0] if (i%2==0 or i==1) else '_nolegend_'))
5802 ymin = minvalue-(maxvalue-minvalue)/5.
5803 ymax = maxvalue+(maxvalue-minvalue)/5.
5804
5805 plt.yscale('linear')
5806 plt.xscale('log')
5807 plt.title(info['title'],fontsize=12,y=1.08)
5808 plt.ylabel(r'$\displaystyle \mathcal{M}$')
5809
5810 if ymax*len(data1)-sum(max(d1[1][-len(d1[1])//2:]) \
5811 for d1 in data1) > 0.5*(ymax-ymin)*len(data1):
5812 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5813 else:
5814 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5815
5816 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list), ymin, ymax])
5817
5818 plt.subplot(212)
5819 minvalue=1e+99
5820 maxvalue=-1e+99
5821
5822 try:
5823 asymptot_index = [d2[0] for d2 in data2].index('Detected asymptot')
5824 plt.plot(lambdaCMS_list, data2[asymptot_index][1],
5825 color='0.75', marker='', linestyle='-', label='')
5826 except ValueError:
5827 pass
5828
5829 color_ID = -1
5830 for d2 in data2:
5831
5832 if d2[0]=='Detected asymptot':
5833 continue
5834 color_ID += 1
5835 color=colorlist[color_ID]
5836 data_plot=d2[1]
5837 minvalue=min(min(data_plot),minvalue)
5838 maxvalue=max(max(data_plot),maxvalue)
5839 plt.plot(lambdaCMS_list, data_plot, color=color, marker='',\
5840 linestyle='-', label=d2[0])
5841 ymin = minvalue-(maxvalue-minvalue)/5.
5842 ymax = maxvalue+(maxvalue-minvalue)/5.
5843
5844 plt.yscale('linear')
5845 plt.xscale('log')
5846 plt.ylabel(r'$\displaystyle \Delta$')
5847 plt.xlabel(r'$\displaystyle \lambda$')
5848
5849
5850 sd = [sorted(d2[1][-len(d2[1])//2:]) for d2 in data2]
5851 left_stability = sum(abs(s[0]-s[-1]) for s in sd)
5852 sd = [sorted(d2[1][:-len(d2[1])//2]) for d2 in data2]
5853 right_stability = sum(abs(s[0]-s[-1]) for s in sd)
5854 left_stable = False if right_stability==0.0 else \
5855 (left_stability/right_stability)<0.1
5856
5857 if left_stable:
5858 if ymax*len(data2)-sum(max(d2[1][-len(d2[1])//2:]) \
5859 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5860 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5861 else:
5862 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5863 else:
5864 if ymax*len(data2)-sum(max(d2[1][:-len(d2[1])//2]) \
5865 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5866 plt.legend(prop={'size':legend_size},loc='upper right', frameon=False)
5867 else:
5868 plt.legend(prop={'size':legend_size},loc='lower right', frameon=False)
5869
5870 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list),\
5871 minvalue-(maxvalue-minvalue)/5., maxvalue+(maxvalue-minvalue)/5.])
5872
5873 plt.savefig(pp,format='pdf')
5874
5875 pp.close()
5876
5877 if len(checks)>0:
5878 logger.info('Complex Mass Scheme check plot output to file %s. '%fig_output_file)
5879
5880 if sys.platform.startswith('linux'):
5881 misc.call(["xdg-open", fig_output_file])
5882 elif sys.platform.startswith('darwin'):
5883 misc.call(["open", fig_output_file])
5884
5885 plt.close("all")
5886
5887 except Exception as e:
5888 if isinstance(e, ImportError):
5889 res_str += "\n= Install matplotlib to get a "+\
5890 "graphical display of the results of the cms check."
5891 else:
5892 general_error = "\n= Could not produce the cms check plot because of "+\
5893 "the following error: %s"%str(e)
5894 try:
5895 import Tkinter
5896 if isinstance(e, Tkinter.TclError):
5897 res_str += "\n= Plots are not generated because your system"+\
5898 " does not support graphical display."
5899 else:
5900 res_str += general_error
5901 except:
5902 res_str += general_error
5903
5904 if options['reuse']:
5905 logFile.write(res_str)
5906 logFile.close()
5907
5908 if output.endswith('text'):
5909 return res_str
5910 else:
5911 return failed_procs
5912