1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Several different checks for processes (and hence models):
16 permutation tests, gauge invariance tests, lorentz invariance
17 tests. Also class for evaluation of Python matrix elements,
18 MatrixElementEvaluator."""
19
20 from __future__ import division
21
22 import array
23 import copy
24 import fractions
25 import itertools
26 import logging
27 import math
28 import os
29 import sys
30 import re
31 import shutil
32 import random
33 import glob
34 import re
35 import subprocess
36 import time
37 import datetime
38 import errno
39
40
41
42 import aloha
43 import aloha.aloha_writers as aloha_writers
44 import aloha.create_aloha as create_aloha
45
46 import madgraph.iolibs.export_python as export_python
47 import madgraph.iolibs.helas_call_writers as helas_call_writers
48 import models.import_ufo as import_ufo
49 import madgraph.iolibs.save_load_object as save_load_object
50
51 import madgraph.core.base_objects as base_objects
52 import madgraph.core.color_algebra as color
53 import madgraph.core.color_amp as color_amp
54 import madgraph.core.helas_objects as helas_objects
55 import madgraph.core.diagram_generation as diagram_generation
56
57 import madgraph.various.rambo as rambo
58 import madgraph.various.misc as misc
59 import madgraph.various.progressbar as pbar
60
61 import madgraph.loop.loop_diagram_generation as loop_diagram_generation
62 import madgraph.loop.loop_helas_objects as loop_helas_objects
63 import madgraph.loop.loop_base_objects as loop_base_objects
64
65 from madgraph import MG5DIR, InvalidCmd, MadGraph5Error
66
67 from madgraph.iolibs.files import cp
68
69 import models.model_reader as model_reader
70 import aloha.template_files.wavefunctions as wavefunctions
71 from aloha.template_files.wavefunctions import \
72 ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx
73
74 ADDED_GLOBAL = []
75
76 temp_dir_prefix = "TMP_CHECK"
77
78 pjoin = os.path.join
81 for value in list(to_clean):
82 del globals()[value]
83 to_clean.remove(value)
84
90 self.cmd_args = args
91 self.cmd_opts = opts
92 self.execution_state = False
93
95 self.max_vms_memory = 0
96 self.max_rss_memory = 0
97
98 self.t1 = None
99 self.t0 = time.time()
100 self.p = subprocess.Popen(*self.cmd_args,**self.cmd_opts)
101 self.execution_state = True
102
104 if not self.check_execution_state():
105 return False
106
107 self.t1 = time.time()
108 flash = subprocess.Popen("ps -p %i -o rss"%self.p.pid,
109 shell=True,stdout=subprocess.PIPE)
110 stdout_list = flash.communicate()[0].split('\n')
111 rss_memory = int(stdout_list[1])
112
113 vms_memory = 0
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142 self.max_vms_memory = max(self.max_vms_memory,vms_memory)
143 self.max_rss_memory = max(self.max_rss_memory,rss_memory)
144
145 return self.check_execution_state()
146
148
149
150 return self.p.poll() == None
151
153 if not self.execution_state:
154 return False
155 if self.is_running():
156 return True
157 self.executation_state = False
158 self.t1 = time.time()
159 return False
160
161 - def close(self,kill=False):
162
163 if self.p.poll() == None:
164 if kill:
165 self.p.kill()
166 else:
167 self.p.terminate()
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182 -class FakeInterface(object):
183 """ Just an 'option container' to mimick the interface which is passed to the
184 tests. We put in only what is now used from interface by the test:
185 cmd.options['fortran_compiler']
186 cmd.options['complex_mass_scheme']
187 cmd._mgme_dir"""
188 - def __init__(self, mgme_dir = "", complex_mass_scheme = False,
189 fortran_compiler = 'gfortran' ):
190 self._mgme_dir = mgme_dir
191 self.options = {}
192 self.options['complex_mass_scheme']=complex_mass_scheme
193 self.options['fortran_compiler']=fortran_compiler
194
195
196
197
198
199 logger = logging.getLogger('madgraph.various.process_checks')
204 """boost the set momenta in the 'boost direction' by the 'beta'
205 factor"""
206
207 boost_p = []
208 gamma = 1/ math.sqrt(1 - beta**2)
209 for imp in p:
210 bosst_p = imp[boost_direction]
211 E, px, py, pz = imp
212 boost_imp = []
213
214 boost_imp.append(gamma * E - gamma * beta * bosst_p)
215
216 if boost_direction == 1:
217 boost_imp.append(-gamma * beta * E + gamma * px)
218 else:
219 boost_imp.append(px)
220
221 if boost_direction == 2:
222 boost_imp.append(-gamma * beta * E + gamma * py)
223 else:
224 boost_imp.append(py)
225
226 if boost_direction == 3:
227 boost_imp.append(-gamma * beta * E + gamma * pz)
228 else:
229 boost_imp.append(pz)
230
231 boost_p.append(boost_imp)
232
233 return boost_p
234
239 """Class taking care of matrix element evaluation, storing
240 relevant quantities for speedup."""
241
242 - def __init__(self, model , param_card = None,
243 auth_skipping = False, reuse = True, cmd = FakeInterface()):
244 """Initialize object with stored_quantities, helas_writer,
245 model, etc.
246 auth_skipping = True means that any identical matrix element will be
247 evaluated only once
248 reuse = True means that the matrix element corresponding to a
249 given process can be reused (turn off if you are using
250 different models for the same process)"""
251
252 self.cmd = cmd
253
254
255 self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model)
256
257
258 self.full_model = model_reader.ModelReader(model)
259 try:
260 self.full_model.set_parameters_and_couplings(param_card)
261 except MadGraph5Error:
262 if isinstance(param_card, (str,file)):
263 raise
264 logger.warning('param_card present in the event file not compatible. We will use the default one.')
265 self.full_model.set_parameters_and_couplings()
266
267 self.auth_skipping = auth_skipping
268 self.reuse = reuse
269 self.cmass_scheme = cmd.options['complex_mass_scheme']
270 self.store_aloha = []
271 self.stored_quantities = {}
272
273
274
275
276 - def evaluate_matrix_element(self, matrix_element, p=None, full_model=None,
277 gauge_check=False, auth_skipping=None, output='m2',
278 options=None):
279 """Calculate the matrix element and evaluate it for a phase space point
280 output is either m2, amp, jamp
281 """
282
283 if full_model:
284 self.full_model = full_model
285 process = matrix_element.get('processes')[0]
286 model = process.get('model')
287
288 if "matrix_elements" not in self.stored_quantities:
289 self.stored_quantities['matrix_elements'] = []
290 matrix_methods = {}
291
292 if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p:
293
294 matrix = eval("Matrix_%s()" % process.shell_string())
295 me_value = matrix.smatrix(p, self.full_model)
296 if output == "m2":
297 return matrix.smatrix(p, self.full_model), matrix.amp2
298 else:
299 m2 = matrix.smatrix(p, self.full_model)
300 return {'m2': m2, output:getattr(matrix, output)}
301 if (auth_skipping or self.auth_skipping) and matrix_element in \
302 self.stored_quantities['matrix_elements']:
303
304 logger.info("Skipping %s, " % process.nice_string() + \
305 "identical matrix element already tested" \
306 )
307 return None
308
309 self.stored_quantities['matrix_elements'].append(matrix_element)
310
311
312
313
314 if "list_colorize" not in self.stored_quantities:
315 self.stored_quantities["list_colorize"] = []
316 if "list_color_basis" not in self.stored_quantities:
317 self.stored_quantities["list_color_basis"] = []
318 if "list_color_matrices" not in self.stored_quantities:
319 self.stored_quantities["list_color_matrices"] = []
320
321 col_basis = color_amp.ColorBasis()
322 new_amp = matrix_element.get_base_amplitude()
323 matrix_element.set('base_amplitude', new_amp)
324 colorize_obj = col_basis.create_color_dict_list(new_amp)
325
326 try:
327
328
329
330 col_index = self.stored_quantities["list_colorize"].index(colorize_obj)
331 except ValueError:
332
333
334 self.stored_quantities['list_colorize'].append(colorize_obj)
335 col_basis.build()
336 self.stored_quantities['list_color_basis'].append(col_basis)
337 col_matrix = color_amp.ColorMatrix(col_basis)
338 self.stored_quantities['list_color_matrices'].append(col_matrix)
339 col_index = -1
340
341
342 matrix_element.set('color_basis',
343 self.stored_quantities['list_color_basis'][col_index])
344 matrix_element.set('color_matrix',
345 self.stored_quantities['list_color_matrices'][col_index])
346
347
348 if "used_lorentz" not in self.stored_quantities:
349 self.stored_quantities["used_lorentz"] = []
350
351 me_used_lorentz = set(matrix_element.get_used_lorentz())
352 me_used_lorentz = [lorentz for lorentz in me_used_lorentz \
353 if lorentz not in self.store_aloha]
354
355 aloha_model = create_aloha.AbstractALOHAModel(model.get('name'))
356 aloha_model.add_Lorentz_object(model.get('lorentz'))
357 aloha_model.compute_subset(me_used_lorentz)
358
359
360 aloha_routines = []
361 for routine in aloha_model.values():
362 aloha_routines.append(routine.write(output_dir = None,
363 mode='mg5',
364 language = 'Python'))
365 for routine in aloha_model.external_routines:
366 aloha_routines.append(
367 open(aloha_model.locate_external(routine, 'Python')).read())
368
369
370 previous_globals = list(globals().keys())
371 for routine in aloha_routines:
372 exec(routine, globals())
373 for key in globals().keys():
374 if key not in previous_globals:
375 ADDED_GLOBAL.append(key)
376
377
378 self.store_aloha.extend(me_used_lorentz)
379
380 exporter = export_python.ProcessExporterPython(matrix_element,
381 self.helas_writer)
382
383 try:
384 matrix_methods = exporter.get_python_matrix_methods(\
385 gauge_check=gauge_check)
386
387 except helas_call_writers.HelasWriterError, error:
388 logger.info(error)
389 return None
390
391
392
393 if self.reuse:
394
395 exec(matrix_methods[process.shell_string()], globals())
396 ADDED_GLOBAL.append('Matrix_%s' % process.shell_string())
397 else:
398
399 exec(matrix_methods[process.shell_string()])
400
401 if not p:
402 p, w_rambo = self.get_momenta(process, options)
403
404 exec("data = Matrix_%s()" % process.shell_string())
405 if output == "m2":
406 return data.smatrix(p, self.full_model), data.amp2
407 else:
408 m2 = data.smatrix(p,self.full_model)
409 return {'m2': m2, output:getattr(data, output)}
410
411
412
413
415 """Get a point in phase space for the external states in the given
416 process, with the CM energy given. The incoming particles are
417 assumed to be oriented along the z axis, with particle 1 along the
418 positive z axis."""
419
420 if not options:
421 energy=1000
422 events=None
423 else:
424 energy = options['energy']
425 events = options['events']
426 to_skip = 0
427
428 if not (isinstance(process, base_objects.Process) and \
429 isinstance(energy, (float,int))):
430 raise rambo.RAMBOError, "Not correct type for arguments to get_momenta"
431
432
433 sorted_legs = sorted(process.get('legs'), lambda l1, l2:\
434 l1.get('number') - l2.get('number'))
435
436
437 if events:
438 ids = [l.get('id') for l in sorted_legs]
439 import MadSpin.decay as madspin
440 if not hasattr(self, 'event_file'):
441 fsock = open(events)
442 self.event_file = madspin.Event(fsock)
443
444 skip = 0
445 while self.event_file.get_next_event() != 'no_event':
446 event = self.event_file.particle
447
448 event_ids = [p['pid'] for p in event.values()]
449 if event_ids == ids:
450 skip += 1
451 if skip > to_skip:
452 break
453 else:
454 raise MadGraph5Error, 'No compatible events for %s' % ids
455 p = []
456 for part in event.values():
457 m = part['momentum']
458 p.append([m.E, m.px, m.py, m.pz])
459 return p, 1
460
461 nincoming = len([leg for leg in sorted_legs if leg.get('state') == False])
462 nfinal = len(sorted_legs) - nincoming
463
464
465 mass_strings = [self.full_model.get_particle(l.get('id')).get('mass') \
466 for l in sorted_legs]
467 mass = [self.full_model.get('parameter_dict')[m] for m in mass_strings]
468 mass = [m.real for m in mass]
469
470
471
472
473
474 energy = max(energy, sum(mass[:nincoming]) + 200.,
475 sum(mass[nincoming:]) + 200.)
476
477 if nfinal == 1:
478 p = []
479 energy = mass[-1]
480 p.append([energy/2,0,0,energy/2])
481 p.append([energy/2,0,0,-energy/2])
482 p.append([mass[-1],0,0,0])
483 return p, 1.0
484
485 e2 = energy**2
486 m1 = mass[0]
487 p = []
488
489 masses = rambo.FortranList(nfinal)
490 for i in range(nfinal):
491 masses[i+1] = mass[nincoming + i]
492
493 if nincoming == 1:
494
495
496 p.append([abs(m1), 0., 0., 0.])
497
498 p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses)
499
500
501 for i in range(1, nfinal+1):
502 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
503 p_rambo[(2,i)], p_rambo[(3,i)]]
504 p.append(momi)
505
506 return p, w_rambo
507
508 if nincoming != 2:
509 raise rambo.RAMBOError('Need 1 or 2 incoming particles')
510
511 if nfinal == 1:
512 energy = masses[1]
513 if masses[1] == 0.0:
514 raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\
515 ' state particle massless is invalid')
516
517 e2 = energy**2
518 m2 = mass[1]
519
520 mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \
521 2*m1**2*m2**2 + m2**4) / (4*e2))
522 e1 = math.sqrt(mom**2+m1**2)
523 e2 = math.sqrt(mom**2+m2**2)
524
525 p.append([e1, 0., 0., mom])
526 p.append([e2, 0., 0., -mom])
527
528 if nfinal == 1:
529 p.append([energy, 0., 0., 0.])
530 return p, 1.
531
532 p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses)
533
534
535 for i in range(1, nfinal+1):
536 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
537 p_rambo[(2,i)], p_rambo[(3,i)]]
538 p.append(momi)
539
540 return p, w_rambo
541
546 """Class taking care of matrix element evaluation for loop processes."""
547
548 - def __init__(self,cuttools_dir=None, output_path=None, tir_dir={},
549 cmd=FakeInterface(),*args,**kwargs):
550 """Allow for initializing the MG5 root where the temporary fortran
551 output for checks is placed."""
552
553 super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs)
554
555 self.mg_root=self.cmd._mgme_dir
556
557 if output_path is None:
558 self.output_path = self.cmd._mgme_dir
559 else:
560 self.output_path = output_path
561
562 self.cuttools_dir=cuttools_dir
563 self.tir_dir=tir_dir
564 self.loop_optimized_output = cmd.options['loop_optimized_output']
565
566
567 self.proliferate=True
568
569
570
571
572 - def evaluate_matrix_element(self, matrix_element, p=None, options=None,
573 gauge_check=False, auth_skipping=None, output='m2',
574 PS_name = None, MLOptions={}):
575 """Calculate the matrix element and evaluate it for a phase space point
576 Output can only be 'm2. The 'jamp' and 'amp' returned values are just
577 empty lists at this point.
578 If PS_name is not none the written out PS.input will be saved in
579 the file PS.input_<PS_name> as well."""
580
581 process = matrix_element.get('processes')[0]
582 model = process.get('model')
583
584 if options and 'split_orders' in options.keys():
585 split_orders = options['split_orders']
586 else:
587 split_orders = -1
588
589 if "loop_matrix_elements" not in self.stored_quantities:
590 self.stored_quantities['loop_matrix_elements'] = []
591
592 if (auth_skipping or self.auth_skipping) and matrix_element in \
593 [el[0] for el in self.stored_quantities['loop_matrix_elements']]:
594
595 logger.info("Skipping %s, " % process.nice_string() + \
596 "identical matrix element already tested" )
597 return None
598
599
600 if not p:
601 p, w_rambo = self.get_momenta(process, options=options)
602
603 if matrix_element in [el[0] for el in \
604 self.stored_quantities['loop_matrix_elements']]:
605 export_dir=self.stored_quantities['loop_matrix_elements'][\
606 [el[0] for el in self.stored_quantities['loop_matrix_elements']\
607 ].index(matrix_element)][1]
608 logger.debug("Reusing generated output %s"%str(export_dir))
609 else:
610 export_dir=pjoin(self.output_path,temp_dir_prefix)
611 if os.path.isdir(export_dir):
612 if not self.proliferate:
613 raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir))
614 else:
615 id=1
616 while os.path.isdir(pjoin(self.output_path,\
617 '%s_%i'%(temp_dir_prefix,id))):
618 id+=1
619 export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id))
620
621 if self.proliferate:
622 self.stored_quantities['loop_matrix_elements'].append(\
623 (matrix_element,export_dir))
624
625
626
627 import madgraph.loop.loop_exporters as loop_exporters
628 if self.loop_optimized_output:
629 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
630 else:
631 exporter_class=loop_exporters.LoopProcessExporterFortranSA
632
633 MLoptions = {'clean': True,
634 'complex_mass': self.cmass_scheme,
635 'export_format':'madloop',
636 'mp':True,
637 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
638 'cuttools_dir': self.cuttools_dir,
639 'fortran_compiler': self.cmd.options['fortran_compiler'],
640 'output_dependencies': self.cmd.options['output_dependencies']}
641
642 MLoptions.update(self.tir_dir)
643
644 FortranExporter = exporter_class(\
645 self.mg_root, export_dir, MLoptions)
646 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
647 FortranExporter.copy_v4template(modelname=model.get('name'))
648 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel)
649 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
650 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
651 for c in l]))
652 FortranExporter.convert_model_to_mg4(model,wanted_lorentz,wanted_couplings)
653 FortranExporter.finalize_v4_directory(None,"",False,False,'gfortran')
654
655 self.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
656 split_orders=split_orders)
657
658 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
659 mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions)
660
661 if gauge_check:
662 file_path, orig_file_content, new_file_content = \
663 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
664 ['helas_calls_ampb_1.f','loop_matrix.f'])
665 file = open(file_path,'w')
666 file.write(new_file_content)
667 file.close()
668 if self.loop_optimized_output:
669 mp_file_path, mp_orig_file_content, mp_new_file_content = \
670 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
671 ['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True)
672 mp_file = open(mp_file_path,'w')
673 mp_file.write(mp_new_file_content)
674 mp_file.close()
675
676
677 finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\
678 export_dir, p, PS_name = PS_name, verbose=False)[0][0]
679
680
681 if gauge_check:
682 file = open(file_path,'w')
683 file.write(orig_file_content)
684 file.close()
685 if self.loop_optimized_output:
686 mp_file = open(mp_file_path,'w')
687 mp_file.write(mp_orig_file_content)
688 mp_file.close()
689
690
691 if not self.proliferate:
692 shutil.rmtree(export_dir)
693
694 if output == "m2":
695
696
697 return finite_m2, []
698 else:
699 return {'m2': finite_m2, output:[]}
700
701 - def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False,
702 DoubleCheckHelicityFilter=False, MLOptions={}):
703 """ Set parameters in MadLoopParams.dat suited for these checks.MP
704 stands for multiple precision and can either be a bool or an integer
705 to specify the mode."""
706
707 if isinstance(mp,bool):
708 mode = 4 if mp else 1
709 else:
710 mode = mp
711
712 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r')
713 MLParams = file.read()
714 file.close()
715
716 for key in MLOptions.keys():
717 if key == "ImprovePS":
718 MLParams = re.sub(r"#ImprovePS\n\S+","#ImprovePS\n%s"%(\
719 '.TRUE.' if MLOptions[key] else '.FALSE.'),MLParams)
720 elif key == "ForceMP":
721 if MLOptions[key]:
722 mode = 4
723 elif key == "MLReductionLib":
724 value=MLOptions[key]
725 if isinstance(value,list):
726 if value:
727 mlred="|".join([str(vl) for vl in value])
728 else:
729 mlred="1"
730 MLParams = re.sub(r"#MLReductionLib\n\S+","#MLReductionLib\n%s"\
731 %(mlred),MLParams)
732 else:
733 MLParams = re.sub(r"#MLReductionLib\n\S+","#MLReductionLib\n%d"\
734 %(MLOptions[key] if MLOptions[key] else 1),MLParams)
735 else:
736 logger.error("Key %s is not a valid MadLoop option."%key)
737
738
739 MLParams = re.sub(r"#CTModeRun\n-?\d+","#CTModeRun\n%d"%mode, MLParams)
740 MLParams = re.sub(r"#CTModeInit\n-?\d+","#CTModeInit\n%d"%mode, MLParams)
741 MLParams = re.sub(r"#UseLoopFilter\n\S+","#UseLoopFilter\n%s"%(\
742 '.TRUE.' if loop_filter else '.FALSE.'),MLParams)
743 MLParams = re.sub(r"#DoubleCheckHelicityFilter\n\S+",
744 "#DoubleCheckHelicityFilter\n%s"%('.TRUE.' if
745 DoubleCheckHelicityFilter else '.FALSE.'),MLParams)
746
747
748 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'w')
749 file.write(MLParams)
750 file.close()
751
752 @classmethod
753 - def fix_PSPoint_in_check(cls, dir_path, read_ps = True, npoints = 1,
754 hel_config = -1, mu_r=0.0, split_orders=-1):
755 """Set check_sa.f to be reading PS.input assuming a working dir dir_name.
756 if hel_config is different than -1 then check_sa.f is configured so to
757 evaluate only the specified helicity.
758 If mu_r > 0.0, then the renormalization constant value will be hardcoded
759 directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it
760 is < 0.0 the value in the param_card.dat is used.
761 If the split_orders target (i.e. the target squared coupling orders for
762 the computation) is != -1, it will be changed in check_sa.f via the
763 subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders)."""
764
765 file_path = dir_path
766 if not os.path.isfile(dir_path) or \
767 not os.path.basename(dir_path)=='check_sa.f':
768 file_path = pjoin(dir_path,'check_sa.f')
769 if not os.path.isfile(file_path):
770 directories = [d for d in glob.glob(pjoin(dir_path,'P*_*')) \
771 if (re.search(r'.*P\d+_\w*$', d) and os.path.isdir(d))]
772 if len(directories)>0:
773 file_path = pjoin(directories[0],'check_sa.f')
774 if not os.path.isfile(file_path):
775 raise MadGraph5Error('Could not find the location of check_sa.f'+\
776 ' from the specified path %s.'%str(file_path))
777
778 file = open(file_path, 'r')
779 check_sa = file.read()
780 file.close()
781
782 file = open(file_path, 'w')
783 check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \
784 else '.FALSE.'), check_sa)
785 check_sa = re.sub(r"NPSPOINTS = \d+","NPSPOINTS = %d"%npoints, check_sa)
786 if hel_config != -1:
787 check_sa = re.sub(r"SLOOPMATRIX\S+\(\S+,MATELEM,",
788 "SLOOPMATRIXHEL_THRES(P,%d,MATELEM,"%hel_config, check_sa)
789 else:
790 check_sa = re.sub(r"SLOOPMATRIX\S+\(\S+,MATELEM,",
791 "SLOOPMATRIX_THRES(P,MATELEM,",check_sa)
792 if mu_r > 0.0:
793 check_sa = re.sub(r"MU_R=SQRTS","MU_R=%s"%\
794 (("%.17e"%mu_r).replace('e','d')),check_sa)
795 elif mu_r < 0.0:
796 check_sa = re.sub(r"MU_R=SQRTS","",check_sa)
797
798 if split_orders > 0:
799 check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)",
800 "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa)
801
802 file.write(check_sa)
803 file.close()
804
805 - def get_me_value(self, proc, proc_id, working_dir, PSpoint=[], \
806 PS_name = None, verbose=True):
807 """Compile and run ./check, then parse the output and return the result
808 for process with id = proc_id and PSpoint if specified.
809 If PS_name is not none the written out PS.input will be saved in
810 the file PS.input_<PS_name> as well"""
811 if verbose:
812 sys.stdout.write('.')
813 sys.stdout.flush()
814
815 shell_name = None
816 directories = glob.glob(pjoin(working_dir, 'SubProcesses',
817 'P%i_*' % proc_id))
818 if directories and os.path.isdir(directories[0]):
819 shell_name = os.path.basename(directories[0])
820
821
822 if not shell_name:
823 logging.info("Directory hasn't been created for process %s" %proc)
824 return ((0.0, 0.0, 0.0, 0.0, 0), [])
825
826 if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name))
827
828 dir_name = pjoin(working_dir, 'SubProcesses', shell_name)
829
830 if os.path.isfile(pjoin(dir_name,'check')):
831 os.remove(pjoin(dir_name,'check'))
832 try:
833 os.remove(pjoin(dir_name,'check_sa.o'))
834 os.remove(pjoin(dir_name,'loop_matrix.o'))
835 except OSError:
836 pass
837
838 devnull = open(os.devnull, 'w')
839 retcode = subprocess.call(['make','check'],
840 cwd=dir_name, stdout=devnull, stderr=devnull)
841 devnull.close()
842
843 if retcode != 0:
844 logging.info("Error while executing make in %s" % shell_name)
845 return ((0.0, 0.0, 0.0, 0.0, 0), [])
846
847
848 if PSpoint:
849 misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint)
850
851
852 if not PS_name is None:
853 misc.write_PS_input(pjoin(dir_name, \
854 'PS.input_%s'%PS_name),PSpoint)
855
856 try:
857 output = subprocess.Popen('./check',
858 cwd=dir_name,
859 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
860 output.read()
861 output.close()
862 if os.path.exists(pjoin(dir_name,'result.dat')):
863 return self.parse_check_output(file(pjoin(dir_name,\
864 'result.dat')),format='tuple')
865 else:
866 logging.warning("Error while looking for file %s"%str(os.path\
867 .join(dir_name,'result.dat')))
868 return ((0.0, 0.0, 0.0, 0.0, 0), [])
869 except IOError:
870 logging.warning("Error while executing ./check in %s" % shell_name)
871 return ((0.0, 0.0, 0.0, 0.0, 0), [])
872
873 @classmethod
875 """Parse the output string and return a pair where first four values are
876 the finite, born, single and double pole of the ME and the fourth is the
877 GeV exponent and the second value is a list of 4 momenta for all particles
878 involved. Return the answer in two possible formats, 'tuple' or 'dict'."""
879
880 res_dict = {'res_p':[],
881 'born':0.0,
882 'finite':0.0,
883 '1eps':0.0,
884 '2eps':0.0,
885 'gev_pow':0,
886 'export_format':'Default',
887 'accuracy':0.0,
888 'return_code':0,
889 'Split_Orders_Names':[],
890 'Loop_SO_Results':[],
891 'Born_SO_Results':[],
892 'Born_kept':[],
893 'Loop_kept':[]
894 }
895 res_p = []
896
897
898
899 if isinstance(output,file) or isinstance(output,list):
900 text=output
901 elif isinstance(output,str):
902 text=output.split('\n')
903 else:
904 raise MadGraph5Error, 'Type for argument output not supported in'+\
905 ' parse_check_output.'
906 for line in text:
907 splitline=line.split()
908 if len(splitline)==0:
909 continue
910 elif splitline[0]=='PS':
911 res_p.append([float(s) for s in splitline[1:]])
912 elif splitline[0]=='BORN':
913 res_dict['born']=float(splitline[1])
914 elif splitline[0]=='FIN':
915 res_dict['finite']=float(splitline[1])
916 elif splitline[0]=='1EPS':
917 res_dict['1eps']=float(splitline[1])
918 elif splitline[0]=='2EPS':
919 res_dict['2eps']=float(splitline[1])
920 elif splitline[0]=='EXP':
921 res_dict['gev_pow']=int(splitline[1])
922 elif splitline[0]=='Export_Format':
923 res_dict['export_format']=splitline[1]
924 elif splitline[0]=='ACC':
925 res_dict['accuracy']=float(splitline[1])
926 elif splitline[0]=='RETCODE':
927 res_dict['return_code']=int(splitline[1])
928 elif splitline[0]=='Split_Orders_Names':
929 res_dict['Split_Orders_Names']=splitline[1:]
930 elif splitline[0] in ['Born_kept', 'Loop_kept']:
931 res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]]
932 elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']:
933
934
935
936
937 res_dict[splitline[0]].append(\
938 ([int(el) for el in splitline[1:]],{}))
939 elif splitline[0]=='SO_Loop':
940 res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\
941 float(splitline[2])
942 elif splitline[0]=='SO_Born':
943 res_dict['Born_SO_Results'][-1][1][splitline[1]]=\
944 float(splitline[2])
945
946 res_dict['res_p'] = res_p
947
948 if format=='tuple':
949 return ((res_dict['finite'],res_dict['born'],res_dict['1eps'],
950 res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p'])
951 else:
952 return res_dict
953
955 """ Modify loop_matrix.f so to have one external massless gauge boson
956 polarization vector turned into its momentum. It is not a pretty and
957 flexible solution but it works for this particular case."""
958
959 shell_name = None
960 directories = glob.glob(pjoin(working_dir,'P0_*'))
961 if directories and os.path.isdir(directories[0]):
962 shell_name = os.path.basename(directories[0])
963
964 dir_name = pjoin(working_dir, shell_name)
965
966
967 ind=0
968 while ind<len(file_names) and not os.path.isfile(pjoin(dir_name,
969 file_names[ind])):
970 ind += 1
971 if ind==len(file_names):
972 raise Exception, "No helas calls output file found."
973
974 helas_file_name=pjoin(dir_name,file_names[ind])
975 file = open(pjoin(dir_name,helas_file_name), 'r')
976
977 helas_calls_out=""
978 original_file=""
979 gaugeVectorRegExp=re.compile(\
980 r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+
981 r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)")
982 foundGauge=False
983
984 for line in file:
985 helas_calls_out+=line
986 original_file+=line
987 if line.find("INCLUDE 'coupl.inc'") != -1 or \
988 line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1:
989 helas_calls_out+=" INTEGER WARDINT\n"
990 if not foundGauge:
991 res=gaugeVectorRegExp.search(line)
992 if res!=None:
993 foundGauge=True
994 helas_calls_out+=" DO WARDINT=1,4\n"
995 helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")="
996 if not mp:
997 helas_calls_out+=\
998 "DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n"
999 else:
1000 helas_calls_out+="CMPLX(P(WARDINT-1,"+\
1001 res.group('p_id')+"),0.0E0_16,KIND=16)\n"
1002 helas_calls_out+=" ENDDO\n"
1003 file.close()
1004
1005 return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
1006
1011 """Class taking care of matrix element evaluation and running timing for
1012 loop processes."""
1013
1017
1018 @classmethod
1020 """ Compile the check program in the directory dir_name.
1021 Return the compilation and running time. """
1022
1023
1024
1025 if os.path.isfile(pjoin(dir_name,'check')):
1026 os.remove(pjoin(dir_name,'check'))
1027 os.remove(pjoin(dir_name,'check_sa.o'))
1028 os.remove(pjoin(dir_name,'loop_matrix.o'))
1029
1030 devnull = open(os.devnull, 'w')
1031 start=time.time()
1032 retcode = subprocess.call(['make','check'],
1033 cwd=dir_name, stdout=devnull, stderr=devnull)
1034 compilation_time = time.time()-start
1035
1036 if retcode != 0:
1037 logging.info("Error while executing make in %s" % dir_name)
1038 return None, None, None
1039
1040 if not checkRam:
1041 start=time.time()
1042 retcode = subprocess.call('./check',
1043 cwd=dir_name, stdout=devnull, stderr=devnull)
1044 run_time = time.time()-start
1045 ram_usage = None
1046 else:
1047 ptimer = ProcessTimer(['./check'], cwd=dir_name, shell=False, \
1048 stdout=devnull, stderr=devnull, close_fds=True)
1049 try:
1050 ptimer.execute()
1051
1052
1053
1054 while ptimer.poll():
1055 time.sleep(.2)
1056 finally:
1057
1058 ptimer.close()
1059
1060 ram_usage = ptimer.max_rss_memory
1061
1062
1063 run_time = (ptimer.t1 - ptimer.t0)
1064 retcode = ptimer.p.returncode
1065
1066 devnull.close()
1067
1068 if retcode != 0:
1069 logging.warning("Error while executing ./check in %s" % dir_name)
1070 return None, None, None
1071
1072 return compilation_time, run_time, ram_usage
1073
1074 @classmethod
1076 """ Return a dictionary of the parameter of the MadLoopParamCard.
1077 The key is the name of the parameter and the value is the corresponding
1078 string read from the card."""
1079
1080 res = {}
1081
1082 MLCard_lines = open(MLCardPath).readlines()
1083 try:
1084 for i, line in enumerate(MLCard_lines):
1085 if line.startswith('#'):
1086 res[line.split()[0][1:]]=MLCard_lines[i+1].split()[0]
1087 return res
1088 except IndexError:
1089 raise MadGraph5Error, 'The MadLoop param card %s is '%MLCardPath+\
1090 'not well formatted.'
1091
1092 @classmethod
1094 """ Set the parameters in MadLoopParamCard to the values specified in
1095 the dictionary params.
1096 The key is the name of the parameter and the value is the corresponding
1097 string to write in the card."""
1098
1099
1100 MLCard_lines = open(MLCardPath).readlines()
1101 newCard_lines = []
1102 modified_Params = []
1103 param_to_modify=None
1104 for i, line in enumerate(MLCard_lines):
1105 if not param_to_modify is None:
1106 modified_Params.append(param_to_modify)
1107 newCard_lines.append(params[param_to_modify]+'\n')
1108 param_to_modify = None
1109 else:
1110 if line.startswith('#') and \
1111 line.split()[0][1:] in params.keys():
1112 param_to_modify = line.split()[0][1:]
1113 newCard_lines.append(line)
1114 if not param_to_modify is None:
1115 raise MadGraph5Error, 'The MadLoop param card %s is '%MLCardPath+\
1116 'not well formatted.'
1117
1118 left_over = set(params.keys())-set(modified_Params)
1119 if left_over != set([]):
1120 raise MadGraph5Error, 'The following parameters could not be '+\
1121 'accessed in MadLoopParams.dat : %s'%str(left_over)
1122
1123 newCard=open(MLCardPath,'w')
1124 newCard.writelines(newCard_lines)
1125 newCard.close()
1126
1127 @classmethod
1128 - def run_initialization(cls, run_dir=None, SubProc_dir=None, infos=None,\
1129 req_files = ['HelFilter.dat','LoopFilter.dat'],
1130 attempts = [3,15]):
1131 """ Run the initialization of the process in 'run_dir' with success
1132 characterized by the creation of the files req_files in this directory.
1133 The directory containing the driving source code 'check_sa.f'.
1134 The list attempt gives the successive number of PS points the
1135 initialization should be tried with before calling it failed.
1136 Returns the number of PS points which were necessary for the init.
1137 Notice at least run_dir or SubProc_dir must be provided."""
1138
1139
1140
1141 if infos is None:
1142 infos={}
1143
1144 if SubProc_dir is None and run_dir is None:
1145 raise MadGraph5Error, 'At least one of [SubProc_dir,run_dir] must'+\
1146 ' be provided in run_initialization.'
1147
1148
1149
1150 if SubProc_dir is None:
1151 SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir))
1152
1153 if run_dir is None:
1154 directories =[ dir for dir in glob.glob(pjoin(SubProc_dir,\
1155 'P[0-9]*')) if os.path.isdir(dir) ]
1156 if directories:
1157 run_dir = directories[0]
1158 else:
1159 raise MadGraph5Error, 'Could not find a valid running directory'+\
1160 ' in %s.'%str(SubProc_dir)
1161
1162 to_attempt = copy.copy(attempts)
1163 to_attempt.reverse()
1164 my_req_files = copy.copy(req_files)
1165
1166
1167 MLCardPath = pjoin(SubProc_dir,os.pardir,'Cards',\
1168 'MadLoopParams.dat')
1169 if not os.path.isfile(MLCardPath):
1170 raise MadGraph5Error, 'Could not find MadLoopParams.dat at %s.'\
1171 %MLCardPath
1172 if 'FALSE' in cls.get_MadLoop_Params(MLCardPath)['UseLoopFilter'].upper():
1173 try:
1174 my_req_files.pop(my_req_files.index('LoopFilter.dat'))
1175 except ValueError:
1176 pass
1177
1178 def need_init():
1179 """ True if init not done yet."""
1180 proc_prefix_file = open(pjoin(run_dir,'proc_prefix.txt'),'r')
1181 proc_prefix = proc_prefix_file.read()
1182 proc_prefix_file.close()
1183 return any([not os.path.exists(pjoin(run_dir,'MadLoop5_resources',
1184 proc_prefix+fname)) for fname in my_req_files]) or \
1185 not os.path.isfile(pjoin(run_dir,'check')) or \
1186 not os.access(pjoin(run_dir,'check'), os.X_OK)
1187
1188 curr_attempt = 1
1189 while to_attempt!=[] and need_init():
1190 curr_attempt = to_attempt.pop()+1
1191
1192
1193 cls.fix_PSPoint_in_check(run_dir, read_ps = False,
1194 npoints = curr_attempt)
1195 compile_time, run_time, ram_usage = cls.make_and_run(run_dir)
1196 if compile_time==None:
1197 logging.error("Failed at running the process in %s."%run_dir)
1198 attempts = None
1199 return None
1200
1201 if 'Process_compilation' not in infos.keys() or \
1202 infos['Process_compilation']==None:
1203 infos['Process_compilation'] = compile_time
1204 infos['Initialization'] = run_time
1205
1206 if need_init():
1207 return None
1208 else:
1209 return curr_attempt-1
1210
1212 """ Edit loop_matrix.f in order to skip the loop evaluation phase.
1213 Notice this only affects the double precision evaluation which is
1214 normally fine as we do not make the timing check on mp."""
1215
1216 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1217 loop_matrix = file.read()
1218 file.close()
1219
1220 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1221 loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.'
1222 if skip else '.FALSE.'), loop_matrix)
1223 file.write(loop_matrix)
1224 file.close()
1225
1227 """ Edit loop_matrix.f in order to set the flag which stops the
1228 execution after booting the program (i.e. reading the color data)."""
1229
1230 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1231 loop_matrix = file.read()
1232 file.close()
1233
1234 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1235 loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.'
1236 if bootandstop else '.FALSE.'), loop_matrix)
1237 file.write(loop_matrix)
1238 file.close()
1239
1240 - def setup_process(self, matrix_element, export_dir, reusing = False,
1241 param_card = None,MLOptions={},clean=True):
1242 """ Output the matrix_element in argument and perform the initialization
1243 while providing some details about the output in the dictionary returned.
1244 Returns None if anything fails"""
1245
1246 infos={'Process_output': None,
1247 'HELAS_MODEL_compilation' : None,
1248 'dir_path' : None,
1249 'Initialization' : None,
1250 'Process_compilation' : None}
1251
1252 if not reusing and clean:
1253 if os.path.isdir(export_dir):
1254 clean_up(self.output_path)
1255 if os.path.isdir(export_dir):
1256 raise InvalidCmd(\
1257 "The directory %s already exist. Please remove it."\
1258 %str(export_dir))
1259 else:
1260 if not os.path.isdir(export_dir):
1261 raise InvalidCmd(\
1262 "Could not find the directory %s to reuse."%str(export_dir))
1263
1264
1265 if not reusing and clean:
1266 model = matrix_element['processes'][0].get('model')
1267
1268
1269 import madgraph.loop.loop_exporters as loop_exporters
1270 if self.loop_optimized_output:
1271 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
1272 else:
1273 exporter_class=loop_exporters.LoopProcessExporterFortranSA
1274
1275 MLoptions = {'clean': True,
1276 'complex_mass': self.cmass_scheme,
1277 'export_format':'madloop',
1278 'mp':True,
1279 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
1280 'cuttools_dir': self.cuttools_dir,
1281 'fortran_compiler':self.cmd.options['fortran_compiler'],
1282 'output_dependencies':self.cmd.options['output_dependencies']}
1283
1284 MLoptions.update(self.tir_dir)
1285
1286 start=time.time()
1287 FortranExporter = exporter_class(self.mg_root, export_dir, MLoptions)
1288 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
1289 FortranExporter.copy_v4template(modelname=model.get('name'))
1290 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel)
1291 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
1292 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
1293 for c in l]))
1294 FortranExporter.convert_model_to_mg4(self.full_model,wanted_lorentz,wanted_couplings)
1295 infos['Process_output'] = time.time()-start
1296 start=time.time()
1297 FortranExporter.finalize_v4_directory(None,"",False,False,'gfortran')
1298 infos['HELAS_MODEL_compilation'] = time.time()-start
1299
1300
1301 if param_card != None:
1302 if isinstance(param_card, str):
1303 cp(pjoin(param_card),\
1304 pjoin(export_dir,'Cards','param_card.dat'))
1305 else:
1306 param_card.write(pjoin(export_dir,'Cards','param_card.dat'))
1307
1308
1309
1310 self.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1311 read_ps = False, npoints = 4)
1312 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
1313 mp = False, loop_filter = True,MLOptions=MLOptions)
1314
1315 shell_name = None
1316 directories = glob.glob(pjoin(export_dir, 'SubProcesses','P0_*'))
1317 if directories and os.path.isdir(directories[0]):
1318 shell_name = os.path.basename(directories[0])
1319 dir_name = pjoin(export_dir, 'SubProcesses', shell_name)
1320 infos['dir_path']=dir_name
1321
1322 attempts = [3,15]
1323
1324 try:
1325 os.remove(pjoin(dir_name,'check'))
1326 os.remove(pjoin(dir_name,'check_sa.o'))
1327 except OSError:
1328 pass
1329
1330 nPS_necessary = self.run_initialization(dir_name,
1331 pjoin(export_dir,'SubProcesses'),infos,\
1332 req_files = ['HelFilter.dat','LoopFilter.dat'],
1333 attempts = attempts)
1334 if attempts is None:
1335 logger.error("Could not compile the process %s,"%shell_name+\
1336 " try to generate it via the 'generate' command.")
1337 return None
1338 if nPS_necessary is None:
1339 logger.error("Could not initialize the process %s"%shell_name+\
1340 " with %s PS points."%max(attempts))
1341 return None
1342 elif nPS_necessary > min(attempts):
1343 logger.warning("Could not initialize the process %s"%shell_name+\
1344 " with %d PS points. It needed %d."%(min(attempts),nPS_necessary))
1345
1346 return infos
1347
1348 - def time_matrix_element(self, matrix_element, reusing = False,
1349 param_card = None, keep_folder = False, options=None,
1350 MLOptions = {}):
1351 """ Output the matrix_element in argument and give detail information
1352 about the timing for its output and running"""
1353
1354
1355
1356
1357
1358 if options and 'split_orders' in options.keys():
1359 split_orders = options['split_orders']
1360 else:
1361 split_orders = -1
1362
1363 assert ((not reusing and isinstance(matrix_element, \
1364 helas_objects.HelasMatrixElement)) or (reusing and
1365 isinstance(matrix_element, base_objects.Process)))
1366 if not reusing:
1367 proc_name = matrix_element['processes'][0].shell_string()[2:]
1368 else:
1369 proc_name = matrix_element.shell_string()[2:]
1370
1371 export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\
1372 temp_dir_prefix+"_%s"%proc_name)
1373
1374 res_timings = self.setup_process(matrix_element,export_dir, \
1375 reusing, param_card,MLOptions = MLOptions)
1376
1377 if res_timings == None:
1378 return None
1379 dir_name=res_timings['dir_path']
1380
1381 def check_disk_usage(path):
1382 return subprocess.Popen("du -shc -L "+str(path), \
1383 stdout=subprocess.PIPE, shell=True).communicate()[0].split()[-2]
1384
1385
1386
1387
1388 res_timings['du_source']=check_disk_usage(pjoin(\
1389 export_dir,'Source','*','*.f'))
1390 res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f'))
1391 res_timings['du_color']=check_disk_usage(pjoin(dir_name,'*.dat'))
1392 res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check'))
1393
1394 if not res_timings['Initialization']==None:
1395 time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0
1396 else:
1397
1398
1399 self.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1400 read_ps = False, npoints = 3, hel_config = -1,
1401 split_orders=split_orders)
1402 compile_time, run_time, ram_usage = self.make_and_run(dir_name)
1403 time_per_ps_estimate = run_time/3.0
1404
1405 self.boot_time_setup(dir_name,bootandstop=True)
1406 compile_time, run_time, ram_usage = self.make_and_run(dir_name)
1407 res_timings['Booting_time'] = run_time
1408 self.boot_time_setup(dir_name,bootandstop=False)
1409
1410
1411 contributing_hel=0
1412 n_contrib_hel=0
1413 proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r')
1414 proc_prefix = proc_prefix_file.read()
1415 proc_prefix_file.close()
1416 helicities = file(pjoin(dir_name,'MadLoop5_resources',
1417 '%sHelFilter.dat'%proc_prefix)).read().split()
1418 for i, hel in enumerate(helicities):
1419 if (self.loop_optimized_output and int(hel)>-10000) or hel=='T':
1420 if contributing_hel==0:
1421 contributing_hel=i+1
1422 n_contrib_hel += 1
1423
1424 if contributing_hel==0:
1425 logger.error("Could not find a contributing helicity "+\
1426 "configuration for process %s."%proc_name)
1427 return None
1428
1429 res_timings['n_contrib_hel']=n_contrib_hel
1430 res_timings['n_tot_hel']=len(helicities)
1431
1432
1433 target_pspoints_number = max(int(30.0/time_per_ps_estimate)+1,5)
1434
1435 logger.info("Checking timing for process %s "%proc_name+\
1436 "with %d PS points."%target_pspoints_number)
1437
1438 self.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1439 read_ps = False, npoints = target_pspoints_number*2, \
1440 hel_config = contributing_hel, split_orders=split_orders)
1441 compile_time, run_time, ram_usage = self.make_and_run(dir_name)
1442 if compile_time == None: return None
1443 res_timings['run_polarized_total']=\
1444 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1445
1446 self.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1447 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1448 split_orders=split_orders)
1449 compile_time, run_time, ram_usage = self.make_and_run(dir_name,
1450 checkRam=True)
1451 if compile_time == None: return None
1452 res_timings['run_unpolarized_total']=\
1453 (run_time-res_timings['Booting_time'])/target_pspoints_number
1454 res_timings['ram_usage'] = ram_usage
1455
1456 if not self.loop_optimized_output:
1457 return res_timings
1458
1459
1460
1461
1462
1463 self.skip_loop_evaluation_setup(dir_name,skip=True)
1464
1465 self.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1466 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1467 split_orders=split_orders)
1468 compile_time, run_time, ram_usage = self.make_and_run(dir_name)
1469 if compile_time == None: return None
1470 res_timings['run_unpolarized_coefs']=\
1471 (run_time-res_timings['Booting_time'])/target_pspoints_number
1472
1473 self.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1474 read_ps = False, npoints = target_pspoints_number*2, \
1475 hel_config = contributing_hel, split_orders=split_orders)
1476 compile_time, run_time, ram_usage = self.make_and_run(dir_name)
1477 if compile_time == None: return None
1478 res_timings['run_polarized_coefs']=\
1479 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1480
1481
1482 self.skip_loop_evaluation_setup(dir_name,skip=False)
1483
1484 return res_timings
1485
1486
1487
1488
1489
1490 - def check_matrix_element_stability(self, matrix_element,options=None,
1491 infos_IN = None, param_card = None, keep_folder = False,
1492 MLOptions = {}):
1493 """ Output the matrix_element in argument, run in for nPoints and return
1494 a dictionary containing the stability information on each of these points.
1495 If infos are provided, then the matrix element output is skipped and
1496 reused from a previous run and the content of infos.
1497 """
1498
1499 if not options:
1500 reusing = False
1501 nPoints = 100
1502 split_orders = -1
1503 else:
1504 reusing = options['reuse']
1505 nPoints = options['npoints']
1506 split_orders = options['split_orders']
1507
1508 assert ((not reusing and isinstance(matrix_element, \
1509 helas_objects.HelasMatrixElement)) or (reusing and
1510 isinstance(matrix_element, base_objects.Process)))
1511
1512
1513 def format_PS_point(ps, rotation=0):
1514 """ Write out the specified PS point to the file dir_path/PS.input
1515 while rotating it if rotation!=0. We consider only rotations of 90
1516 but one could think of having rotation of arbitrary angle too.
1517 The first two possibilities, 1 and 2 are a rotation and boost
1518 along the z-axis so that improve_ps can still work.
1519 rotation=0 => No rotation
1520 rotation=1 => Z-axis pi/2 rotation
1521 rotation=2 => Z-axis pi/4 rotation
1522 rotation=3 => Z-axis boost
1523 rotation=4 => (x'=z,y'=-x,z'=-y)
1524 rotation=5 => (x'=-z,y'=y,z'=x)"""
1525 if rotation==0:
1526 p_out=copy.copy(ps)
1527 elif rotation==1:
1528 p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps]
1529 elif rotation==2:
1530 sq2 = math.sqrt(2.0)
1531 p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps]
1532 elif rotation==3:
1533 p_out = boost_momenta(ps, 3)
1534
1535
1536 elif rotation==4:
1537 p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps]
1538 elif rotation==5:
1539 p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps]
1540 else:
1541 raise MadGraph5Error("Rotation id %i not implemented"%rotation)
1542
1543 return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
1544
1545 def pick_PS_point(proc, options):
1546 """ Randomly generate a PS point and make sure it is eligible. Then
1547 return it. Users can edit the cuts here if they want."""
1548 def Pt(pmom):
1549 """ Computes the pt of a 4-momentum"""
1550 return math.sqrt(pmom[1]**2+pmom[2]**2)
1551 def DeltaR(p1,p2):
1552 """ Computes the DeltaR between two 4-momenta"""
1553
1554 p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2)
1555 p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2)
1556 eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3]))
1557 eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3]))
1558
1559 phi1=math.atan2(p1[2],p1[1])
1560 phi2=math.atan2(p2[2],p2[1])
1561 dphi=abs(phi2-phi1)
1562
1563 dphi=abs(abs(dphi-math.pi)-math.pi)
1564
1565 return math.sqrt(dphi**2+(eta2-eta1)**2)
1566
1567 def pass_cuts(p):
1568 """ Defines the cut a PS point must pass"""
1569 for i, pmom in enumerate(p[2:]):
1570
1571 if Pt(pmom)<50.0:
1572 return False
1573
1574 for pmom2 in p[3+i:]:
1575 if DeltaR(pmom,pmom2)<0.5:
1576 return False
1577 return True
1578 p, w_rambo = self.get_momenta(proc, options)
1579 if options['events']:
1580 return p
1581
1582 while (not pass_cuts(p) and len(p)>3):
1583 p, w_rambo = self.get_momenta(proc, options)
1584
1585
1586
1587
1588 if len(p)==3:
1589 p = boost_momenta(p,3,random.uniform(0.0,0.99))
1590 return p
1591
1592
1593
1594
1595 accuracy_threshold=1.0e-1
1596
1597
1598
1599 num_rotations = 1
1600
1601 if "MLReductionLib" not in MLOptions:
1602 tools=[1]
1603 else:
1604 tools=MLOptions["MLReductionLib"]
1605 tools=list(set(tools))
1606
1607 tool_var={'pjfry':2,'golem':4}
1608 for tool in ['pjfry','golem']:
1609 tool_dir='%s_dir'%tool
1610 if not tool_dir in self.tir_dir:
1611 continue
1612 tool_libpath=self.tir_dir[tool_dir]
1613 tool_libname="lib%s.a"%tool
1614 if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \
1615 or (not os.path.isfile(pjoin(tool_libpath,tool_libname))):
1616 if tool_var[tool] in tools:
1617 tools.remove(tool_var[tool])
1618 if not tools:
1619 return None
1620
1621 if not reusing:
1622 process = matrix_element['processes'][0]
1623 else:
1624 process = matrix_element
1625 proc_name = process.shell_string()[2:]
1626 export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\
1627 temp_dir_prefix+"_%s"%proc_name)
1628
1629 tools_name={1:'CutTools',2:'PJFry++',3:'IREGI',4:'Golem95'}
1630 return_dict={}
1631 return_dict['Stability']={}
1632 infos_save={'Process_output': None,
1633 'HELAS_MODEL_compilation' : None,
1634 'dir_path' : None,
1635 'Initialization' : None,
1636 'Process_compilation' : None}
1637
1638 for tool in tools:
1639 tool_name=tools_name[tool]
1640
1641
1642
1643
1644
1645 DP_stability = []
1646 QP_stability = []
1647
1648 Unstable_PS_points = []
1649
1650 Exceptional_PS_points = []
1651
1652 MLoptions={}
1653 MLoptions["MLReductionLib"]=tool
1654 clean=(tool==tools[0])
1655 if infos_IN==None or (tool_name not in infos_IN):
1656 infos=infos_IN
1657 else:
1658 infos=infos_IN[tool_name]
1659
1660 if not infos:
1661 infos = self.setup_process(matrix_element,export_dir, \
1662 reusing, param_card,MLoptions,clean)
1663 if not infos:
1664 return None
1665
1666 if clean:
1667 infos_save['Process_output']=infos['Process_output']
1668 infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation']
1669 infos_save['dir_path']=infos['dir_path']
1670 infos_save['Process_compilation']=infos['Process_compilation']
1671 else:
1672 if not infos['Process_output']:
1673 infos['Process_output']=infos_save['Process_output']
1674 if not infos['HELAS_MODEL_compilation']:
1675 infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation']
1676 if not infos['dir_path']:
1677 infos['dir_path']=infos_save['dir_path']
1678 if not infos['Process_compilation']:
1679 infos['Process_compilation']=infos_save['Process_compilation']
1680
1681 dir_path=infos['dir_path']
1682
1683
1684 savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool]
1685 data_i = 0
1686
1687 if reusing:
1688
1689 data_i=0
1690 while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))):
1691 pickle_path = pjoin(dir_path,savefile%('_%d'%data_i))
1692 saved_run = save_load_object.load_from_file(pickle_path)
1693 if data_i>0:
1694 logger.info("Loading additional data stored in %s."%
1695 str(pickle_path))
1696 logger.info("Loaded data moved to %s."%str(pjoin(
1697 dir_path,'LOADED_'+savefile%('_%d'%data_i))))
1698 shutil.move(pickle_path,
1699 pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i)))
1700 DP_stability.extend(saved_run['DP_stability'])
1701 QP_stability.extend(saved_run['QP_stability'])
1702 Unstable_PS_points.extend(saved_run['Unstable_PS_points'])
1703 Exceptional_PS_points.extend(saved_run['Exceptional_PS_points'])
1704 data_i += 1
1705
1706 return_dict['Stability'][tool_name] = {'DP_stability':DP_stability,
1707 'QP_stability':QP_stability,
1708 'Unstable_PS_points':Unstable_PS_points,
1709 'Exceptional_PS_points':Exceptional_PS_points}
1710
1711 if nPoints==0:
1712 if len(return_dict['Stability'][tool_name]['DP_stability'])!=0:
1713
1714 if data_i>1:
1715 save_load_object.save_to_file(pjoin(dir_path,
1716 savefile%'_0'),return_dict['Stability'][tool_name])
1717 continue
1718 else:
1719 logger.info("ERROR: Not reusing a directory and the number"+\
1720 " of point for the check is zero.")
1721 return None
1722
1723 logger.info("Checking stability of process %s "%proc_name+\
1724 "with %d PS points by %s."%(nPoints,tool_name))
1725 if infos['Initialization'] != None:
1726 time_per_ps_estimate = (infos['Initialization']/4.0)/2.0
1727 sec_needed = int(time_per_ps_estimate*nPoints*4)
1728 else:
1729 sec_needed = 0
1730
1731 progress_bar = None
1732 time_info = False
1733 if sec_needed>5:
1734 time_info = True
1735 logger.info("This check should take about "+\
1736 "%s to run. Started on %s."%(\
1737 str(datetime.timedelta(seconds=sec_needed)),\
1738 datetime.datetime.now().strftime("%d-%m-%Y %H:%M")))
1739 if logger.getEffectiveLevel()<logging.WARNING and \
1740 (sec_needed>5 or (reusing and infos['Initialization'] == None)):
1741 widgets = ['Stability check:', pbar.Percentage(), ' ',
1742 pbar.Bar(),' ', pbar.ETA(), ' ']
1743 progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints,
1744 fd=sys.stdout)
1745 self.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1746 read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders)
1747
1748
1749
1750 try:
1751 os.remove(pjoin(dir_path,'check'))
1752 os.remove(pjoin(dir_path,'check_sa.o'))
1753 except OSError:
1754 pass
1755
1756 devnull = open(os.devnull, 'w')
1757 retcode = subprocess.call(['make','check'],
1758 cwd=dir_path, stdout=devnull, stderr=devnull)
1759 devnull.close()
1760 if retcode != 0:
1761 logging.info("Error while executing make in %s" % dir_path)
1762 return None
1763
1764
1765
1766
1767 if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')):
1768
1769
1770 if os.path.isfile(pjoin(dir_path,'born_matrix.f')):
1771 checkerName = 'StabilityCheckDriver.f'
1772 else:
1773 checkerName = 'StabilityCheckDriver_loop_induced.f'
1774
1775 with open(pjoin(self.mg_root,'Template','loop_material','Checks',
1776 checkerName),'r') as checkerFile:
1777 with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix:
1778 checkerToWrite = checkerFile.read()%{'proc_prefix':
1779 proc_prefix.read()}
1780 checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w')
1781 checkerFile.write(checkerToWrite)
1782 checkerFile.close()
1783
1784
1785
1786
1787
1788 if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')):
1789 os.remove(pjoin(dir_path,'StabilityCheckDriver'))
1790 if os.path.isfile(pjoin(dir_path,'loop_matrix.o')):
1791 os.remove(pjoin(dir_path,'loop_matrix.o'))
1792 misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \
1793 mode='fortran', job_specs = False)
1794
1795
1796
1797
1798 if len(process['legs'])==3:
1799 self.fix_MadLoopParamCard(dir_path, mp=False,
1800 loop_filter=False, DoubleCheckHelicityFilter=True)
1801
1802 StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')],
1803 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1804 cwd=dir_path)
1805 start_index = len(DP_stability)
1806 if progress_bar!=None:
1807 progress_bar.start()
1808
1809
1810 interrupted = False
1811
1812
1813 retry = 0
1814
1815 i=start_index
1816 if options and 'events' in options and options['events']:
1817
1818 import MadSpin.decay as madspin
1819 fsock = open(options['events'])
1820 self.event_file = madspin.Event(fsock)
1821 while i<(start_index+nPoints):
1822
1823 qp_dict={}
1824 dp_dict={}
1825 UPS = None
1826 EPS = None
1827
1828 if retry==0:
1829 p = pick_PS_point(process, options)
1830
1831 try:
1832 if progress_bar!=None:
1833 progress_bar.update(i+1-start_index)
1834
1835 PSPoint = format_PS_point(p,0)
1836 dp_res=[]
1837 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1838 split_orders=split_orders))
1839 dp_dict['CTModeA']=dp_res[-1]
1840 dp_res.append(self.get_me_value(StabChecker,PSPoint,2,
1841 split_orders=split_orders))
1842 dp_dict['CTModeB']=dp_res[-1]
1843 for rotation in range(1,num_rotations+1):
1844 PSPoint = format_PS_point(p,rotation)
1845 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1846 split_orders=split_orders))
1847 dp_dict['Rotation%i'%rotation]=dp_res[-1]
1848
1849 if any([not res for res in dp_res]):
1850 return None
1851 dp_accuracy =((max(dp_res)-min(dp_res))/
1852 abs(sum(dp_res)/len(dp_res)))
1853 dp_dict['Accuracy'] = dp_accuracy
1854 if dp_accuracy>accuracy_threshold:
1855 if tool==1:
1856
1857 UPS = [i,p]
1858 qp_res=[]
1859 PSPoint = format_PS_point(p,0)
1860 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1861 split_orders=split_orders))
1862 qp_dict['CTModeA']=qp_res[-1]
1863 qp_res.append(self.get_me_value(StabChecker,PSPoint,5,
1864 split_orders=split_orders))
1865 qp_dict['CTModeB']=qp_res[-1]
1866 for rotation in range(1,num_rotations+1):
1867 PSPoint = format_PS_point(p,rotation)
1868 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1869 split_orders=split_orders))
1870 qp_dict['Rotation%i'%rotation]=qp_res[-1]
1871
1872 if any([not res for res in qp_res]):
1873 return None
1874
1875 qp_accuracy = ((max(qp_res)-min(qp_res))/
1876 abs(sum(qp_res)/len(qp_res)))
1877 qp_dict['Accuracy']=qp_accuracy
1878 if qp_accuracy>accuracy_threshold:
1879 EPS = [i,p]
1880 else:
1881
1882
1883 UPS = [i,p]
1884
1885 except KeyboardInterrupt:
1886 interrupted = True
1887 break
1888 except IOError, e:
1889 if e.errno == errno.EINTR:
1890 if retry==100:
1891 logger.error("Failed hundred times consecutively because"+
1892 " of system call interruptions.")
1893 raise
1894 else:
1895 logger.debug("Recovered from a system call interruption."+\
1896 "PSpoint #%i, Attempt #%i."%(i,retry+1))
1897
1898 time.sleep(0.5)
1899
1900 retry = retry+1
1901
1902 try:
1903 StabChecker.kill()
1904 except Exception:
1905 pass
1906 StabChecker = subprocess.Popen(\
1907 [pjoin(dir_path,'StabilityCheckDriver')],
1908 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
1909 stderr=subprocess.PIPE, cwd=dir_path)
1910 continue
1911 else:
1912 raise
1913
1914
1915
1916 retry = 0
1917
1918 i=i+1
1919
1920
1921 DP_stability.append(dp_dict)
1922 QP_stability.append(qp_dict)
1923 if not EPS is None:
1924 Exceptional_PS_points.append(EPS)
1925 if not UPS is None:
1926 Unstable_PS_points.append(UPS)
1927
1928 if progress_bar!=None:
1929 progress_bar.finish()
1930 if time_info:
1931 logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\
1932 "%d-%m-%Y %H:%M"))
1933
1934
1935 if not interrupted:
1936 StabChecker.stdin.write('y\n')
1937 else:
1938 StabChecker.kill()
1939
1940
1941
1942
1943
1944
1945
1946 save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\
1947 return_dict['Stability'][tool_name])
1948
1949 if interrupted:
1950 break
1951
1952 return_dict['Process'] = matrix_element.get('processes')[0] if not \
1953 reusing else matrix_element
1954 return return_dict
1955
1956 @classmethod
1957 - def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0,
1958 split_orders=-1):
1959 """ This version of get_me_value is simplified for the purpose of this
1960 class. No compilation is necessary. The CT mode can be specified."""
1961
1962
1963 StabChecker.stdin.write('\x1a')
1964 StabChecker.stdin.write('1\n')
1965 StabChecker.stdin.write('%d\n'%mode)
1966 StabChecker.stdin.write('%s\n'%PSpoint)
1967 StabChecker.stdin.write('%.16E\n'%mu_r)
1968 StabChecker.stdin.write('%d\n'%hel)
1969 StabChecker.stdin.write('%d\n'%split_orders)
1970 try:
1971 while True:
1972 output = StabChecker.stdout.readline()
1973 if output==' ##TAG#RESULT_START#TAG##\n':
1974 break
1975 res = ""
1976 while True:
1977 output = StabChecker.stdout.readline()
1978 if output==' ##TAG#RESULT_STOP#TAG##\n':
1979 break
1980 else:
1981 res += output
1982 return cls.parse_check_output(res,format='tuple')[0][0]
1983 except IOError as e:
1984 logging.warning("Error while running MadLoop. Exception = %s"%str(e))
1985 raise e
1986
1989 """ Perform a python evaluation of the matrix element independently for
1990 all possible helicity configurations for a fixed number of points N and
1991 returns the average for each in the format [[hel_config, eval],...].
1992 This is used to determine what are the vanishing and dependent helicity
1993 configurations at generation time and accordingly setup the output.
1994 This is not yet implemented at LO."""
1995
1996
1997 assert isinstance(process,base_objects.Process)
1998 assert process.get('perturbation_couplings')==[]
1999
2000 N_eval=50
2001
2002 evaluator = MatrixElementEvaluator(process.get('model'), param_card,
2003 auth_skipping = False, reuse = True)
2004
2005 amplitude = diagram_generation.Amplitude(process)
2006 matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False)
2007
2008 cumulative_helEvals = []
2009
2010 for i in range(N_eval):
2011 p, w_rambo = evaluator.get_momenta(process)
2012 helEvals = evaluator.evaluate_matrix_element(\
2013 matrix_element, p = p, output = 'helEvals')['helEvals']
2014 if cumulative_helEvals==[]:
2015 cumulative_helEvals=copy.copy(helEvals)
2016 else:
2017 cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \
2018 enumerate(cumulative_helEvals)]
2019
2020
2021 cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals]
2022
2023
2024
2025 clean_added_globals(ADDED_GLOBAL)
2026
2027 return cumulative_helEvals
2028
2031 """A wrapper function for running an iteration of a function over
2032 a multiprocess, without having to first create a process list
2033 (which makes a big difference for very large multiprocesses.
2034 stored_quantities is a dictionary for any quantities that we want
2035 to reuse between runs."""
2036
2037 model = multiprocess.get('model')
2038 isids = [leg.get('ids') for leg in multiprocess.get('legs') \
2039 if not leg.get('state')]
2040 fsids = [leg.get('ids') for leg in multiprocess.get('legs') \
2041 if leg.get('state')]
2042
2043 id_anti_id_dict = {}
2044 for id in set(tuple(sum(isids+fsids, []))):
2045 id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code()
2046 id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id
2047 sorted_ids = []
2048 results = []
2049 for is_prod in apply(itertools.product, isids):
2050 for fs_prod in apply(itertools.product, fsids):
2051
2052
2053 if check_already_checked(is_prod, fs_prod, sorted_ids,
2054 multiprocess, model, id_anti_id_dict):
2055 continue
2056
2057 process = multiprocess.get_process_with_legs(base_objects.LegList(\
2058 [base_objects.Leg({'id': id, 'state':False}) for \
2059 id in is_prod] + \
2060 [base_objects.Leg({'id': id, 'state':True}) for \
2061 id in fs_prod]))
2062
2063 if opt is not None:
2064 if isinstance(opt, dict):
2065 try:
2066 value = opt[process.base_string()]
2067 except Exception:
2068 continue
2069 result = function(process, stored_quantities, value, options=options)
2070 else:
2071 result = function(process, stored_quantities, opt, options=options)
2072 else:
2073 result = function(process, stored_quantities, options=options)
2074
2075 if result:
2076 results.append(result)
2077
2078 return results
2079
2080
2081
2082
2083
2084 -def check_already_checked(is_ids, fs_ids, sorted_ids, process, model,
2085 id_anti_id_dict = {}):
2086 """Check if process already checked, if so return True, otherwise add
2087 process and antiprocess to sorted_ids."""
2088
2089
2090 if id_anti_id_dict:
2091 is_ids = [id_anti_id_dict[id] for id in \
2092 is_ids]
2093 else:
2094 is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \
2095 is_ids]
2096
2097 ids = array.array('i', sorted(is_ids + list(fs_ids)) + \
2098 [process.get('id')])
2099
2100 if ids in sorted_ids:
2101
2102 return True
2103
2104
2105 sorted_ids.append(ids)
2106
2107
2108 return False
2109
2115 """ Generate a loop matrix element from the process definition, and returns
2116 it along with the timing information dictionary.
2117 If reuse is True, it reuses the already output directory if found."""
2118
2119 assert isinstance(process_definition,base_objects.ProcessDefinition)
2120 assert process_definition.get('perturbation_couplings')!=[]
2121
2122 if not output_path is None:
2123 root_path = output_path
2124 else:
2125 root_path = cmd._mgme_dir
2126
2127 timing = {'Diagrams_generation': None,
2128 'n_loops': None,
2129 'HelasDiagrams_generation': None,
2130 'n_loop_groups': None,
2131 'n_loop_wfs': None,
2132 'loop_wfs_ranks': None}
2133
2134 if any(len(l.get('ids'))>1 for l in process_definition.get('legs')):
2135 raise InvalidCmd("This check can only be performed on single "+
2136 " processes. (i.e. without multiparticle labels).")
2137
2138 isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2139 if not leg.get('state')]
2140 fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2141 if leg.get('state')]
2142
2143
2144 process = process_definition.get_process(isids,fsids)
2145
2146 proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%(
2147 '_'.join(process.shell_string().split('_')[1:])))
2148 if reuse and os.path.isdir(proc_dir):
2149 logger.info("Reusing directory %s"%str(proc_dir))
2150
2151 return timing, process
2152
2153 logger.info("Generating p%s"%process_definition.nice_string()[1:])
2154
2155 start=time.time()
2156 amplitude = loop_diagram_generation.LoopAmplitude(process)
2157
2158
2159 loop_optimized_output = cmd.options['loop_optimized_output']
2160 if not amplitude.get('process').get('has_born'):
2161 loop_optimized_output = False
2162 timing['Diagrams_generation']=time.time()-start
2163 timing['n_loops']=len(amplitude.get('loop_diagrams'))
2164 start=time.time()
2165
2166 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2167 optimized_output = loop_optimized_output,gen_color=True)
2168
2169
2170
2171 matrix_element.compute_all_analytic_information()
2172 timing['HelasDiagrams_generation']=time.time()-start
2173
2174 if loop_optimized_output:
2175 timing['n_loop_groups']=len(matrix_element.get('loop_groups'))
2176 lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \
2177 ldiag.get('loop_wavefunctions')]
2178 timing['n_loop_wfs']=len(lwfs)
2179 timing['loop_wfs_ranks']=[]
2180 for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \
2181 for l in lwfs])+1):
2182 timing['loop_wfs_ranks'].append(\
2183 len([1 for l in lwfs if \
2184 l.get_analytic_info('wavefunction_rank')==rank]))
2185
2186 return timing, matrix_element
2187
2188
2189
2190
2191 -def check_profile(process_definition, param_card = None,cuttools="",tir={},
2192 options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
2193 """For a single loop process, check both its timings and then its stability
2194 in one go without regenerating it."""
2195
2196 if 'reuse' not in options:
2197 keep_folder=False
2198 else:
2199 keep_folder = options['reuse']
2200
2201 model=process_definition.get('model')
2202
2203 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2204 keep_folder,output_path=output_path,cmd=cmd)
2205 reusing = isinstance(matrix_element, base_objects.Process)
2206 options['reuse'] = reusing
2207 myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2208 model=model, output_path=output_path, cmd=cmd)
2209
2210 if not reusing and not matrix_element.get('processes')[0].get('has_born'):
2211 myProfiler.loop_optimized_output=False
2212 if not myProfiler.loop_optimized_output:
2213 MLoptions={}
2214 else:
2215 MLoptions=MLOptions
2216 timing2 = myProfiler.time_matrix_element(matrix_element, reusing,
2217 param_card, keep_folder=keep_folder,options=options,
2218 MLOptions = MLoptions)
2219
2220 if timing2 == None:
2221 return None, None
2222
2223
2224 timing = dict(timing1.items()+timing2.items())
2225 stability = myProfiler.check_matrix_element_stability(matrix_element,
2226 options=options, infos_IN=timing,param_card=param_card,
2227 keep_folder = keep_folder,
2228 MLOptions = MLoptions)
2229 if stability == None:
2230 return None, None
2231 else:
2232 timing['loop_optimized_output']=myProfiler.loop_optimized_output
2233 stability['loop_optimized_output']=myProfiler.loop_optimized_output
2234 return timing, stability
2235
2236
2237
2238
2239 -def check_stability(process_definition, param_card = None,cuttools="",tir={},
2240 options=None,nPoints=100, output_path=None,
2241 cmd = FakeInterface(), MLOptions = {}):
2242 """For a single loop process, give a detailed summary of the generation and
2243 execution timing."""
2244
2245 if "reuse" in options:
2246 reuse=options['reuse']
2247 else:
2248 reuse=False
2249
2250 reuse=options['reuse']
2251 keep_folder = reuse
2252 model=process_definition.get('model')
2253
2254
2255 timing, matrix_element = generate_loop_matrix_element(process_definition,
2256 reuse, output_path=output_path, cmd=cmd)
2257 reusing = isinstance(matrix_element, base_objects.Process)
2258 options['reuse'] = reusing
2259 myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2260 output_path=output_path,model=model,cmd=cmd)
2261 if not reusing and not matrix_element.get('processes')[0].get('has_born'):
2262 myStabilityChecker.loop_optimized_output=False
2263 if not myStabilityChecker.loop_optimized_output:
2264 MLoptions = {}
2265 else:
2266 MLoptions = MLOptions
2267 if "MLReductionLib" not in MLOptions:
2268 MLoptions["MLReductionLib"] = []
2269 if cuttools:
2270 MLoptions["MLReductionLib"].extend([1])
2271 if "iregi_dir" in tir:
2272 MLoptions["MLReductionLib"].extend([3])
2273 if "pjfry_dir" in tir:
2274 MLoptions["MLReductionLib"].extend([2])
2275 if "golem_dir" in tir:
2276 MLoptions["MLReductionLib"].extend([4])
2277
2278 stability = myStabilityChecker.check_matrix_element_stability(matrix_element,
2279 options=options,param_card=param_card,
2280 keep_folder=keep_folder,
2281 MLOptions=MLoptions)
2282
2283 if stability == None:
2284 return None
2285 else:
2286 stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output
2287 return stability
2288
2289
2290
2291
2292 -def check_timing(process_definition, param_card= None, cuttools="",tir={},
2293 output_path=None, options={}, cmd = FakeInterface(),
2294 MLOptions = {}):
2295 """For a single loop process, give a detailed summary of the generation and
2296 execution timing."""
2297
2298 if 'reuse' not in options:
2299 keep_folder = False
2300 else:
2301 keep_folder = options['reuse']
2302 model=process_definition.get('model')
2303 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2304 keep_folder, output_path=output_path, cmd=cmd)
2305 reusing = isinstance(matrix_element, base_objects.Process)
2306 options['reuse'] = reusing
2307 myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir,
2308 output_path=output_path, cmd=cmd)
2309 if not reusing and not matrix_element.get('processes')[0].get('has_born'):
2310 myTimer.loop_optimized_output=False
2311 if not myTimer.loop_optimized_output:
2312 MLoptions = {}
2313 else:
2314 MLoptions = MLOptions
2315 timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card,
2316 keep_folder = keep_folder, options=options,
2317 MLOptions = MLoptions)
2318
2319 if timing2 == None:
2320 return None
2321 else:
2322
2323 res = dict(timing1.items()+timing2.items())
2324 res['loop_optimized_output']=myTimer.loop_optimized_output
2325 return res
2326
2327
2328
2329
2330 -def check_processes(processes, param_card = None, quick = [],cuttools="",tir={},
2331 options=None, reuse = False, output_path=None, cmd = FakeInterface()):
2332 """Check processes by generating them with all possible orderings
2333 of particles (which means different diagram building and Helas
2334 calls), and comparing the resulting matrix element values."""
2335
2336 cmass_scheme = cmd.options['complex_mass_scheme']
2337 if isinstance(processes, base_objects.ProcessDefinition):
2338
2339
2340 multiprocess = processes
2341 model = multiprocess.get('model')
2342
2343
2344 if multiprocess.get('perturbation_couplings')==[]:
2345 evaluator = MatrixElementEvaluator(model,
2346 auth_skipping = True, reuse = False, cmd = cmd)
2347 else:
2348 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2349 model=model, auth_skipping = True,
2350 reuse = False, output_path=output_path, cmd = cmd)
2351
2352 results = run_multiprocs_no_crossings(check_process,
2353 multiprocess,
2354 evaluator,
2355 quick,
2356 options)
2357
2358 if "used_lorentz" not in evaluator.stored_quantities:
2359 evaluator.stored_quantities["used_lorentz"] = []
2360
2361 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
2362
2363 clean_up(output_path)
2364
2365 return results, evaluator.stored_quantities["used_lorentz"]
2366
2367 elif isinstance(processes, base_objects.Process):
2368 processes = base_objects.ProcessList([processes])
2369 elif isinstance(processes, base_objects.ProcessList):
2370 pass
2371 else:
2372 raise InvalidCmd("processes is of non-supported format")
2373
2374 if not processes:
2375 raise InvalidCmd("No processes given")
2376
2377 model = processes[0].get('model')
2378
2379
2380 if processes[0].get('perturbation_couplings')==[]:
2381 evaluator = MatrixElementEvaluator(model, param_card,
2382 auth_skipping = True, reuse = False, cmd = cmd)
2383 else:
2384 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
2385 model=model,param_card=param_card,
2386 auth_skipping = True, reuse = False,
2387 output_path=output_path, cmd = cmd)
2388
2389
2390
2391 sorted_ids = []
2392 comparison_results = []
2393
2394
2395 for process in processes:
2396
2397
2398 if check_already_checked([l.get('id') for l in process.get('legs') if \
2399 not l.get('state')],
2400 [l.get('id') for l in process.get('legs') if \
2401 l.get('state')],
2402 sorted_ids, process, model):
2403 continue
2404
2405 res = check_process(process, evaluator, quick, options)
2406 if res:
2407 comparison_results.append(res)
2408
2409 if "used_lorentz" not in evaluator.stored_quantities:
2410 evaluator.stored_quantities["used_lorentz"] = []
2411
2412 if processes[0].get('perturbation_couplings')!=[] and not reuse:
2413
2414 clean_up(output_path)
2415
2416 return comparison_results, evaluator.stored_quantities["used_lorentz"]
2417
2419 """Check the helas calls for a process by generating the process
2420 using all different permutations of the process legs (or, if
2421 quick, use a subset of permutations), and check that the matrix
2422 element is invariant under this."""
2423
2424 model = process.get('model')
2425
2426
2427 for i, leg in enumerate(process.get('legs')):
2428 leg.set('number', i+1)
2429
2430 logger.info("Checking crossings of %s" % \
2431 process.nice_string().replace('Process:', 'process'))
2432
2433 process_matrix_elements = []
2434
2435
2436
2437 if quick:
2438 leg_positions = [[] for leg in process.get('legs')]
2439 quick = range(1,len(process.get('legs')) + 1)
2440
2441 values = []
2442
2443
2444 number_checked=0
2445 for legs in itertools.permutations(process.get('legs')):
2446
2447 order = [l.get('number') for l in legs]
2448 if quick:
2449 found_leg = True
2450 for num in quick:
2451
2452
2453 leg_position = legs.index([l for l in legs if \
2454 l.get('number') == num][0])
2455
2456 if not leg_position in leg_positions[num-1]:
2457 found_leg = False
2458 leg_positions[num-1].append(leg_position)
2459
2460 if found_leg:
2461 continue
2462
2463
2464
2465 if quick and process.get('perturbation_couplings') and number_checked >3:
2466 continue
2467
2468 legs = base_objects.LegList(legs)
2469
2470 if order != range(1,len(legs) + 1):
2471 logger.info("Testing permutation: %s" % \
2472 order)
2473
2474 newproc = copy.copy(process)
2475 newproc.set('legs',legs)
2476
2477
2478 try:
2479 if newproc.get('perturbation_couplings')==[]:
2480 amplitude = diagram_generation.Amplitude(newproc)
2481 else:
2482
2483 loop_base_objects.cutting_method = 'optimal' if \
2484 number_checked%2 == 0 else 'default'
2485 amplitude = loop_diagram_generation.LoopAmplitude(newproc)
2486 if not amplitude.get('process').get('has_born'):
2487 evaluator.loop_optimized_output = False
2488 except InvalidCmd:
2489 result=False
2490 else:
2491 result = amplitude.get('diagrams')
2492
2493 loop_base_objects.cutting_method = 'optimal'
2494
2495 if not result:
2496
2497 logging.info("No diagrams for %s" % \
2498 process.nice_string().replace('Process', 'process'))
2499 break
2500
2501 if order == range(1,len(legs) + 1):
2502
2503 p, w_rambo = evaluator.get_momenta(process, options)
2504
2505
2506 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
2507 matrix_element = helas_objects.HelasMatrixElement(amplitude,
2508 gen_color=False)
2509 else:
2510 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2511 optimized_output=evaluator.loop_optimized_output)
2512
2513
2514
2515
2516 if amplitude.get('process').get('has_born'):
2517
2518
2519 if matrix_element in process_matrix_elements:
2520
2521
2522 continue
2523
2524 process_matrix_elements.append(matrix_element)
2525
2526 res = evaluator.evaluate_matrix_element(matrix_element, p = p,
2527 options=options)
2528 if res == None:
2529 break
2530
2531 values.append(res[0])
2532 number_checked += 1
2533
2534
2535
2536 if abs(max(values)) + abs(min(values)) > 0 and \
2537 2 * abs(max(values) - min(values)) / \
2538 (abs(max(values)) + abs(min(values))) > 0.01:
2539 break
2540
2541
2542 if not values:
2543 return None
2544
2545
2546
2547 diff = 0
2548 if abs(max(values)) + abs(min(values)) > 0:
2549 diff = 2* abs(max(values) - min(values)) / \
2550 (abs(max(values)) + abs(min(values)))
2551
2552
2553 if process.get('perturbation_couplings'):
2554 passed = diff < 1.e-5
2555 else:
2556 passed = diff < 1.e-8
2557
2558 return {"process": process,
2559 "momenta": p,
2560 "values": values,
2561 "difference": diff,
2562 "passed": passed}
2563
2565 """Clean-up the possible left-over outputs from 'evaluate_matrix element' of
2566 the LoopMatrixEvaluator (when its argument proliferate is set to true). """
2567
2568 if mg_root is None:
2569 pass
2570
2571 directories = glob.glob(pjoin(mg_root, '%s*'%temp_dir_prefix))
2572 if directories != []:
2573 logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix)
2574 for dir in directories:
2575
2576 if os.path.isdir(pjoin(dir,'SubProcesses')):
2577 shutil.rmtree(dir)
2578
2587
2588 -def output_profile(myprocdef, stability, timing, output_path, reusing=False):
2589 """Present the results from a timing and stability consecutive check"""
2590
2591 opt = timing['loop_optimized_output']
2592
2593 text = 'Timing result for the '+('optimized' if opt else 'default')+\
2594 ' output:\n'
2595 text += output_timings(myprocdef,timing)
2596
2597 text += '\nStability result for the '+('optimized' if opt else 'default')+\
2598 ' output:\n'
2599 text += output_stability(stability,output_path, reusing=reusing)
2600
2601 mode = 'optimized' if opt else 'default'
2602 logFilePath = pjoin(output_path, 'profile_%s_%s.log'\
2603 %(mode,stability['Process'].shell_string()))
2604 logFile = open(logFilePath, 'w')
2605 logFile.write(text)
2606 logFile.close()
2607 logger.info('Log of this profile check was output to file %s'\
2608 %str(logFilePath))
2609 return text
2610
2612 """Present the result of a stability check in a nice format.
2613 The full info is printed out in 'Stability_result_<proc_shell_string>.dat'
2614 under the MadGraph5_aMC@NLO root folder (output_path)"""
2615
2616 def accuracy(eval_list):
2617 """ Compute the accuracy from different evaluations."""
2618 return (2.0*(max(eval_list)-min(eval_list))/
2619 abs(max(eval_list)+min(eval_list)))
2620
2621 def best_estimate(eval_list):
2622 """ Returns the best estimate from different evaluations."""
2623 return (max(eval_list)+min(eval_list))/2.0
2624
2625 def loop_direction_test_power(eval_list):
2626 """ Computes the loop direction test power P is computed as follow:
2627 P = accuracy(loop_dir_test) / accuracy(all_test)
2628 So that P is large if the loop direction test is effective.
2629 The tuple returned is (log(median(P)),log(min(P)),frac)
2630 where frac is the fraction of events with powers smaller than -3
2631 which means events for which the reading direction test shows an
2632 accuracy three digits higher than it really is according to the other
2633 tests."""
2634 powers=[]
2635 for eval in eval_list:
2636 loop_dir_evals = [eval['CTModeA'],eval['CTModeB']]
2637
2638 other_evals = [eval[key] for key in eval.keys() if key not in \
2639 ['CTModeB','Accuracy']]
2640 if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0:
2641 powers.append(accuracy(loop_dir_evals)/accuracy(other_evals))
2642
2643 n_fail=0
2644 for p in powers:
2645 if (math.log(p)/math.log(10))<-3:
2646 n_fail+=1
2647
2648 if len(powers)==0:
2649 return (None,None,None)
2650
2651 return (math.log(median(powers))/math.log(10),
2652 math.log(min(powers))/math.log(10),
2653 n_fail/len(powers))
2654
2655 def test_consistency(dp_eval_list, qp_eval_list):
2656 """ Computes the consistency test C from the DP and QP evaluations.
2657 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2658 So a consistent test would have C as close to one as possible.
2659 The tuple returned is (log(median(C)),log(min(C)),log(max(C)))"""
2660 consistencies = []
2661 for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list):
2662 dp_evals = [dp_eval[key] for key in dp_eval.keys() \
2663 if key!='Accuracy']
2664 qp_evals = [qp_eval[key] for key in qp_eval.keys() \
2665 if key!='Accuracy']
2666 if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \
2667 accuracy(dp_evals)!=0.0:
2668 consistencies.append(accuracy(dp_evals)/(abs(\
2669 best_estimate(qp_evals)-best_estimate(dp_evals))))
2670
2671 if len(consistencies)==0:
2672 return (None,None,None)
2673
2674 return (math.log(median(consistencies))/math.log(10),
2675 math.log(min(consistencies))/math.log(10),
2676 math.log(max(consistencies))/math.log(10))
2677
2678 def median(orig_list):
2679 """ Find the median of a sorted float list. """
2680 list=copy.copy(orig_list)
2681 list.sort()
2682 if len(list)%2==0:
2683 return (list[int((len(list)/2)-1)]+list[int(len(list)/2)])/2.0
2684 else:
2685 return list[int((len(list)-1)/2)]
2686
2687
2688 f = format_output
2689
2690 opt = stability['loop_optimized_output']
2691
2692 mode = 'optimized' if opt else 'default'
2693 process = stability['Process']
2694 res_str = "Stability checking for %s (%s mode)\n"\
2695 %(process.nice_string()[9:],mode)
2696
2697 logFile = open(pjoin(output_path, 'stability_%s_%s.log'\
2698 %(mode,process.shell_string())), 'w')
2699
2700 logFile.write('Stability check results\n\n')
2701 logFile.write(res_str)
2702 data_plot_dict={}
2703 accuracy_dict={}
2704 nPSmax=0
2705 max_acc=0.0
2706 min_acc=1.0
2707 if stability['Stability']:
2708 toolnames= stability['Stability'].keys()
2709 toolnamestr=" | ".join(tn+
2710 ''.join([' ']*(10-len(tn))) for tn in toolnames)
2711 DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \
2712 for key,stab in stability['Stability'].items()]
2713 med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability])
2714 min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability])
2715 max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability])
2716 UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()]
2717 res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr
2718 len_PS=["%i"%len(evals)+\
2719 ''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability]
2720 len_PS_str=" | ".join(len_PS)
2721 res_str_i += "|= Number of PS points considered %s\n"%len_PS_str
2722 res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str
2723 res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str
2724 res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str
2725 pmedminlist=[]
2726 pfraclist=[]
2727 for key,stab in stability['Stability'].items():
2728 (pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability'])
2729 ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f'))
2730 pfrac_str = f(pfrac,'%.2e')
2731 pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str))))
2732 pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str))))
2733 pmedminlist_str=" | ".join(pmedminlist)
2734 pfraclist_str=" | ".join(pfraclist)
2735 res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str
2736 res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str
2737 len_UPS=["%i"%len(upup)+\
2738 ''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS]
2739 len_UPS_str=" | ".join(len_UPS)
2740 res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str
2741 res_str_i += \
2742 """
2743 = Legend for the statistics of the stability tests. (all log below ar log_10)
2744 The loop direction test power P is computed as follow:
2745 P = accuracy(loop_dir_test) / accuracy(all_other_test)
2746 So that log(P) is positive if the loop direction test is effective.
2747 The tuple printed out is (log(median(P)),log(min(P)))
2748 The consistency test C is computed when QP evaluations are available:
2749 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2750 So a consistent test would have log(C) as close to zero as possible.
2751 The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n"""
2752 res_str+=res_str_i
2753 for key in stability['Stability'].keys():
2754 toolname=key
2755 stab=stability['Stability'][key]
2756 DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']]
2757
2758 QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \
2759 stab['QP_stability']]
2760 nPS = len(DP_stability)
2761 if nPS>nPSmax:nPSmax=nPS
2762 UPS = stab['Unstable_PS_points']
2763 UPS_stability_DP = [DP_stability[U[0]] for U in UPS]
2764 UPS_stability_QP = [QP_stability[U[0]] for U in UPS]
2765 EPS = stab['Exceptional_PS_points']
2766 EPS_stability_DP = [DP_stability[E[0]] for E in EPS]
2767 EPS_stability_QP = [QP_stability[E[0]] for E in EPS]
2768 res_str_i = ""
2769
2770 if len(UPS)>0:
2771 res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\
2772 %(len(UPS),nPS,toolname)
2773 prefix = 'DP' if toolname=='CutTools' else ''
2774 res_str_i += "|= %s Median inaccuracy.......... %s\n"\
2775 %(prefix,f(median(UPS_stability_DP),'%.2e'))
2776 res_str_i += "|= %s Max accuracy............... %s\n"\
2777 %(prefix,f(min(UPS_stability_DP),'%.2e'))
2778 res_str_i += "|= %s Min accuracy............... %s\n"\
2779 %(prefix,f(max(UPS_stability_DP),'%.2e'))
2780 (pmed,pmin,pfrac)=loop_direction_test_power(\
2781 [stab['DP_stability'][U[0]] for U in UPS])
2782 if toolname=='CutTools':
2783 res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\
2784 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2785 res_str_i += "|= UPS DP fraction with power<-3. %s\n"\
2786 %f(pfrac,'%.2e')
2787 res_str_i += "|= QP Median accuracy............ %s\n"\
2788 %f(median(UPS_stability_QP),'%.2e')
2789 res_str_i += "|= QP Max accuracy............... %s\n"\
2790 %f(min(UPS_stability_QP),'%.2e')
2791 res_str_i += "|= QP Min accuracy............... %s\n"\
2792 %f(max(UPS_stability_QP),'%.2e')
2793 (pmed,pmin,pfrac)=loop_direction_test_power(\
2794 [stab['QP_stability'][U[0]] for U in UPS])
2795 res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\
2796 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2797 res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2798 (pmed,pmin,pmax)=test_consistency(\
2799 [stab['DP_stability'][U[0]] for U in UPS],
2800 [stab['QP_stability'][U[0]] for U in UPS])
2801 res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\
2802 %(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f'))
2803 if len(EPS)==0:
2804 res_str_i += "= Number of Exceptional PS points : 0\n"
2805 if len(EPS)>0:
2806 res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\
2807 %(len(EPS),nPS,toolname)
2808 res_str_i += "|= DP Median accuracy............ %s\n"\
2809 %f(median(EPS_stability_DP),'%.2e')
2810 res_str_i += "|= DP Max accuracy............... %s\n"\
2811 %f(min(EPS_stability_DP),'%.2e')
2812 res_str_i += "|= DP Min accuracy............... %s\n"\
2813 %f(max(EPS_stability_DP),'%.2e')
2814 pmed,pmin,pfrac=loop_direction_test_power(\
2815 [stab['DP_stability'][E[0]] for E in EPS])
2816 res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\
2817 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2818 res_str_i += "|= EPS DP fraction with power<-3. %s\n"\
2819 %f(pfrac,'%.2e')
2820 res_str_i += "|= QP Median accuracy............ %s\n"\
2821 %f(median(EPS_stability_QP),'%.2e')
2822 res_str_i += "|= QP Max accuracy............... %s\n"\
2823 %f(min(EPS_stability_QP),'%.2e')
2824 res_str_i += "|= QP Min accuracy............... %s\n"\
2825 %f(max(EPS_stability_QP),'%.2e')
2826 pmed,pmin,pfrac=loop_direction_test_power(\
2827 [stab['QP_stability'][E[0]] for E in EPS])
2828 res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\
2829 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2830 res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2831
2832 logFile.write(res_str_i)
2833
2834 if len(EPS)>0:
2835 logFile.write('\nFull details of the %i EPS encountered by %s.\n'\
2836 %(len(EPS),toolname))
2837 for i, eps in enumerate(EPS):
2838 logFile.write('\nEPS #%i\n'%(i+1))
2839 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2840 for p in eps[1]]))
2841 logFile.write('\n DP accuracy : %.3e\n'%DP_stability[eps[0]])
2842 logFile.write(' QP accuracy : %.3e\n'%QP_stability[eps[0]])
2843 if len(UPS)>0:
2844 logFile.write('\nFull details of the %i UPS encountered by %s.\n'\
2845 %(len(UPS),toolname))
2846 for i, ups in enumerate(UPS):
2847 logFile.write('\nUPS #%i\n'%(i+1))
2848 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2849 for p in ups[1]]))
2850 logFile.write('\n DP accuracy : %.3e\n'%DP_stability[ups[0]])
2851 logFile.write(' QP accuracy : %.3e\n'%QP_stability[ups[0]])
2852
2853 logFile.write('\nData entries for the stability plot.\n')
2854 logFile.write('First row is a maximal accuracy delta, second is the '+\
2855 'fraction of events with DP accuracy worse than delta.\n\n')
2856
2857 if max(DP_stability)>0.0:
2858 min_digit_acc=int(math.log(max(DP_stability))/math.log(10))
2859 if min_digit_acc>=0:
2860 min_digit_acc = min_digit_acc+1
2861 accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)]
2862 else:
2863 res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\
2864 ' is output then.'
2865 logFile.write('Perfect accuracy over all the trial PS points.')
2866 res_str +=res_str_i
2867 continue
2868
2869 accuracy_dict[toolname]=accuracies
2870 if max(accuracies) > max_acc: max_acc=max(accuracies)
2871 if min(accuracies) < min_acc: min_acc=min(accuracies)
2872 data_plot=[]
2873 for acc in accuracies:
2874 data_plot.append(float(len([d for d in DP_stability if d>acc]))\
2875 /float(len(DP_stability)))
2876 data_plot_dict[toolname]=data_plot
2877
2878 logFile.writelines('%.3e %.3e\n'%(accuracies[i], data_plot[i]) for i in \
2879 range(len(accuracies)))
2880 logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\
2881 %(nPS,toolname))
2882 logFile.write('First row is DP, second is QP (if available).\n\n')
2883 logFile.writelines('%.3e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \
2884 else '%.3e\n'%QP_stability[i]) for i in range(nPS))
2885 res_str+=res_str_i
2886 logFile.close()
2887 res_str += "\n= Stability details of the run are output to the file"+\
2888 " stability_%s_%s.log\n"%(mode,process.shell_string())
2889
2890
2891
2892
2893 if any(isinstance(handler,logging.FileHandler) for handler in \
2894 logging.getLogger('madgraph').handlers):
2895 return res_str
2896
2897 try:
2898 import matplotlib.pyplot as plt
2899 colorlist=['b','r','g','y']
2900 for i,key in enumerate(data_plot_dict.keys()):
2901 color=colorlist[i]
2902 data_plot=data_plot_dict[key]
2903 accuracies=accuracy_dict[key]
2904 plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\
2905 label=key)
2906 plt.axis([min_acc,max_acc,\
2907 10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1])
2908 plt.yscale('log')
2909 plt.xscale('log')
2910 plt.title('Stability plot for %s (%s mode, %d points)'%\
2911 (process.nice_string()[9:],mode,nPSmax))
2912 plt.ylabel('Fraction of events')
2913 plt.xlabel('Maximal precision')
2914 plt.legend()
2915 if not reusing:
2916 logger.info('Some stability statistics will be displayed once you '+\
2917 'close the plot window')
2918 plt.show()
2919 else:
2920 fig_output_file = str(pjoin(output_path,
2921 'stability_plot_%s_%s.png'%(mode,process.shell_string())))
2922 logger.info('Stability plot output to file %s. '%fig_output_file)
2923 plt.savefig(fig_output_file)
2924 return res_str
2925 except Exception as e:
2926 if isinstance(e, ImportError):
2927 res_str += "\n= Install matplotlib to get a "+\
2928 "graphical display of the results of this check."
2929 else:
2930 res_str += "\n= Could not produce the stability plot because of "+\
2931 "the following error: %s"%str(e)
2932 return res_str
2933
2935 """Present the result of a timings check in a nice format """
2936
2937
2938 f = format_output
2939 loop_optimized_output = timings['loop_optimized_output']
2940
2941 res_str = "%s \n"%process.nice_string()
2942 try:
2943 gen_total = timings['HELAS_MODEL_compilation']+\
2944 timings['HelasDiagrams_generation']+\
2945 timings['Process_output']+\
2946 timings['Diagrams_generation']+\
2947 timings['Process_compilation']+\
2948 timings['Initialization']
2949 except TypeError:
2950 gen_total = None
2951 res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs')
2952 res_str += "|= Diagrams generation....... %s\n"\
2953 %f(timings['Diagrams_generation'],'%.3gs')
2954 res_str += "|= Helas Diagrams generation. %s\n"\
2955 %f(timings['HelasDiagrams_generation'],'%.3gs')
2956 res_str += "|= Process output............ %s\n"\
2957 %f(timings['Process_output'],'%.3gs')
2958 res_str += "|= HELAS+model compilation... %s\n"\
2959 %f(timings['HELAS_MODEL_compilation'],'%.3gs')
2960 res_str += "|= Process compilation....... %s\n"\
2961 %f(timings['Process_compilation'],'%.3gs')
2962 res_str += "|= Initialization............ %s\n"\
2963 %f(timings['Initialization'],'%.3gs')
2964 res_str += "\n= Unpolarized time / PSpoint. ========== %.3gms\n"\
2965 %(timings['run_unpolarized_total']*1000.0)
2966 if loop_optimized_output:
2967 coef_time=timings['run_unpolarized_coefs']*1000.0
2968 loop_time=(timings['run_unpolarized_total']-\
2969 timings['run_unpolarized_coefs'])*1000.0
2970 total=coef_time+loop_time
2971 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2972 %(coef_time,int(round(100.0*coef_time/total)))
2973 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\
2974 %(loop_time,int(round(100.0*loop_time/total)))
2975
2976 res_str += "\n= Polarized time / PSpoint... ========== %.3gms\n"\
2977 %(timings['run_polarized_total']*1000.0)
2978 if loop_optimized_output:
2979 coef_time=timings['run_polarized_coefs']*1000.0
2980 loop_time=(timings['run_polarized_total']-\
2981 timings['run_polarized_coefs'])*1000.0
2982 total=coef_time+loop_time
2983 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2984 %(coef_time,int(round(100.0*coef_time/total)))
2985 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\
2986 %(loop_time,int(round(100.0*loop_time/total)))
2987 res_str += "\n= Miscellaneous ========================\n"
2988 res_str += "|= Number of hel. computed... %s/%s\n"\
2989 %(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d'))
2990 res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d')
2991 if loop_optimized_output:
2992 res_str += "|= Number of loop groups..... %s\n"\
2993 %f(timings['n_loop_groups'],'%d')
2994 res_str += "|= Number of loop wfs........ %s\n"\
2995 %f(timings['n_loop_wfs'],'%d')
2996 if timings['loop_wfs_ranks']!=None:
2997 for i, r in enumerate(timings['loop_wfs_ranks']):
2998 res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r)
2999 res_str += "|= Loading time (Color data). ~%.3gms\n"\
3000 %(timings['Booting_time']*1000.0)
3001 res_str += "|= Maximum RAM usage (rss)... %s\n"\
3002 %f(float(timings['ram_usage']/1000.0),'%.3gMb')
3003 res_str += "\n= Output disk size =====================\n"
3004 res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb')
3005 res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb')
3006 res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb')
3007 res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb')
3008
3009 return res_str
3010
3012 """Present the results of a comparison in a nice list format
3013 mode short: return the number of fail process
3014 """
3015 proc_col_size = 17
3016 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
3017 if pert_coupl:
3018 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3019 else:
3020 process_header = "Process"
3021
3022 if len(process_header) + 1 > proc_col_size:
3023 proc_col_size = len(process_header) + 1
3024
3025 for proc in comparison_results:
3026 if len(proc['process'].base_string()) + 1 > proc_col_size:
3027 proc_col_size = len(proc['process'].base_string()) + 1
3028
3029 col_size = 18
3030
3031 pass_proc = 0
3032 fail_proc = 0
3033 no_check_proc = 0
3034
3035 failed_proc_list = []
3036 no_check_proc_list = []
3037
3038 res_str = fixed_string_length(process_header, proc_col_size) + \
3039 fixed_string_length("Min element", col_size) + \
3040 fixed_string_length("Max element", col_size) + \
3041 fixed_string_length("Relative diff.", col_size) + \
3042 "Result"
3043
3044 for result in comparison_results:
3045 proc = result['process'].base_string()
3046 values = result['values']
3047
3048 if len(values) <= 1:
3049 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3050 " * No permutations, process not checked *"
3051 no_check_proc += 1
3052 no_check_proc_list.append(result['process'].nice_string())
3053 continue
3054
3055 passed = result['passed']
3056
3057 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3058 fixed_string_length("%1.10e" % min(values), col_size) + \
3059 fixed_string_length("%1.10e" % max(values), col_size) + \
3060 fixed_string_length("%1.10e" % result['difference'],
3061 col_size)
3062 if passed:
3063 pass_proc += 1
3064 res_str += "Passed"
3065 else:
3066 fail_proc += 1
3067 failed_proc_list.append(result['process'].nice_string())
3068 res_str += "Failed"
3069
3070 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3071 (pass_proc, pass_proc + fail_proc,
3072 fail_proc, pass_proc + fail_proc)
3073
3074 if fail_proc != 0:
3075 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3076 if no_check_proc != 0:
3077 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
3078
3079 return res_str
3080
3082 """Helper function to fix the length of a string by cutting it
3083 or adding extra space."""
3084
3085 if len(mystr) > length:
3086 return mystr[0:length]
3087 else:
3088 return mystr + " " * (length - len(mystr))
3089
3090
3091
3092
3093
3094 -def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False,
3095 options=None, output_path=None, cmd = FakeInterface()):
3096 """Check gauge invariance of the processes by using the BRS check.
3097 For one of the massless external bosons (e.g. gluon or photon),
3098 replace the polarization vector (epsilon_mu) with its momentum (p_mu)
3099 """
3100 cmass_scheme = cmd.options['complex_mass_scheme']
3101 if isinstance(processes, base_objects.ProcessDefinition):
3102
3103
3104 multiprocess = processes
3105
3106 model = multiprocess.get('model')
3107
3108 if multiprocess.get('perturbation_couplings')==[]:
3109 evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd,
3110 auth_skipping = True, reuse = False)
3111 else:
3112 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3113 cmd=cmd,model=model, param_card=param_card,
3114 auth_skipping = False, reuse = False,
3115 output_path=output_path)
3116
3117 if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]:
3118
3119 logger.info('Set All width to zero for non complex mass scheme checks')
3120 for particle in evaluator.full_model.get('particles'):
3121 if particle.get('width') != 'ZERO':
3122 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3123 results = run_multiprocs_no_crossings(check_gauge_process,
3124 multiprocess,
3125 evaluator,
3126 options=options
3127 )
3128
3129 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3130
3131 clean_up(output_path)
3132
3133 return results
3134
3135 elif isinstance(processes, base_objects.Process):
3136 processes = base_objects.ProcessList([processes])
3137 elif isinstance(processes, base_objects.ProcessList):
3138 pass
3139 else:
3140 raise InvalidCmd("processes is of non-supported format")
3141
3142 assert processes, "No processes given"
3143
3144 model = processes[0].get('model')
3145
3146
3147 if processes[0].get('perturbation_couplings')==[]:
3148 evaluator = MatrixElementEvaluator(model, param_card,
3149 auth_skipping = True, reuse = False,
3150 cmd = cmd)
3151 else:
3152 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3153 model=model, param_card=param_card,
3154 auth_skipping = False, reuse = False,
3155 output_path=output_path, cmd = cmd)
3156 comparison_results = []
3157 comparison_explicit_flip = []
3158
3159
3160 for process in processes:
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170 result = check_gauge_process(process, evaluator,options=options)
3171 if result:
3172 comparison_results.append(result)
3173
3174 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3175
3176 clean_up(output_path)
3177
3178 return comparison_results
3179
3182 """Check gauge invariance for the process, unless it is already done."""
3183
3184 model = process.get('model')
3185
3186
3187 found_gauge = False
3188 for i, leg in enumerate(process.get('legs')):
3189 part = model.get_particle(leg.get('id'))
3190 if part.get('spin') == 3 and part.get('mass').lower() == 'zero':
3191 found_gauge = True
3192 break
3193 if not found_gauge:
3194 logger.info("No ward identity for %s" % \
3195 process.nice_string().replace('Process', 'process'))
3196
3197 return None
3198
3199 for i, leg in enumerate(process.get('legs')):
3200 leg.set('number', i+1)
3201
3202 logger.info("Checking ward identities for %s" % \
3203 process.nice_string().replace('Process', 'process'))
3204
3205 legs = process.get('legs')
3206
3207
3208 try:
3209 if process.get('perturbation_couplings')==[]:
3210 amplitude = diagram_generation.Amplitude(process)
3211 else:
3212 amplitude = loop_diagram_generation.LoopAmplitude(process)
3213 if not amplitude.get('process').get('has_born'):
3214 evaluator.loop_optimized_output = False
3215 except InvalidCmd:
3216 logging.info("No diagrams for %s" % \
3217 process.nice_string().replace('Process', 'process'))
3218 return None
3219 if not amplitude.get('diagrams'):
3220
3221 logging.info("No diagrams for %s" % \
3222 process.nice_string().replace('Process', 'process'))
3223 return None
3224
3225 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3226 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3227 gen_color = False)
3228 else:
3229 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3230 optimized_output=evaluator.loop_optimized_output)
3231
3232
3233
3234
3235
3236
3237
3238 brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
3239 output='jamp', options=options)
3240
3241 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3242 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3243 gen_color = False)
3244
3245 mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False,
3246 output='jamp', options=options)
3247
3248 if mvalue and mvalue['m2']:
3249 return {'process':process,'value':mvalue,'brs':brsvalue}
3250
3252 """Present the results of a comparison in a nice list format"""
3253
3254 proc_col_size = 17
3255
3256 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
3257
3258
3259 if pert_coupl:
3260 threshold=1e-5
3261 else:
3262 threshold=1e-10
3263
3264 if pert_coupl:
3265 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3266 else:
3267 process_header = "Process"
3268
3269 if len(process_header) + 1 > proc_col_size:
3270 proc_col_size = len(process_header) + 1
3271
3272 for one_comp in comparison_results:
3273 proc = one_comp['process'].base_string()
3274 mvalue = one_comp['value']
3275 brsvalue = one_comp['brs']
3276 if len(proc) + 1 > proc_col_size:
3277 proc_col_size = len(proc) + 1
3278
3279 col_size = 18
3280
3281 pass_proc = 0
3282 fail_proc = 0
3283
3284 failed_proc_list = []
3285 no_check_proc_list = []
3286
3287 res_str = fixed_string_length(process_header, proc_col_size) + \
3288 fixed_string_length("matrix", col_size) + \
3289 fixed_string_length("BRS", col_size) + \
3290 fixed_string_length("ratio", col_size) + \
3291 "Result"
3292
3293 for one_comp in comparison_results:
3294 proc = one_comp['process'].base_string()
3295 mvalue = one_comp['value']
3296 brsvalue = one_comp['brs']
3297 ratio = (abs(brsvalue['m2'])/abs(mvalue['m2']))
3298 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3299 fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \
3300 fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \
3301 fixed_string_length("%1.10e" % ratio, col_size)
3302
3303 if ratio > threshold:
3304 fail_proc += 1
3305 proc_succeed = False
3306 failed_proc_list.append(proc)
3307 res_str += "Failed"
3308 else:
3309 pass_proc += 1
3310 proc_succeed = True
3311 res_str += "Passed"
3312
3313
3314
3315
3316
3317 if len(mvalue['jamp'])!=0:
3318 for k in range(len(mvalue['jamp'][0])):
3319 m_sum = 0
3320 brs_sum = 0
3321
3322 for j in range(len(mvalue['jamp'])):
3323
3324 m_sum += abs(mvalue['jamp'][j][k])**2
3325 brs_sum += abs(brsvalue['jamp'][j][k])**2
3326
3327
3328 if not m_sum:
3329 continue
3330 ratio = abs(brs_sum) / abs(m_sum)
3331
3332 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
3333 fixed_string_length("%1.10e" % m_sum, col_size) + \
3334 fixed_string_length("%1.10e" % brs_sum, col_size) + \
3335 fixed_string_length("%1.10e" % ratio, col_size)
3336
3337 if ratio > 1e-15:
3338 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3339 fail_proc += 1
3340 pass_proc -= 1
3341 failed_proc_list.append(proc)
3342 res_str += tmp_str + "Failed"
3343 elif not proc_succeed:
3344 res_str += tmp_str + "Passed"
3345
3346
3347 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3348 (pass_proc, pass_proc + fail_proc,
3349 fail_proc, pass_proc + fail_proc)
3350
3351 if fail_proc != 0:
3352 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3353
3354 if output=='text':
3355 return res_str
3356 else:
3357 return fail_proc
3358
3359
3360
3361 -def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \
3362 reuse = False, output_path=None, cmd = FakeInterface()):
3363 """ Check if the square matrix element (sum over helicity) is lorentz
3364 invariant by boosting the momenta with different value."""
3365
3366 cmass_scheme = cmd.options['complex_mass_scheme']
3367 if isinstance(processes, base_objects.ProcessDefinition):
3368
3369
3370 multiprocess = processes
3371 model = multiprocess.get('model')
3372
3373 if multiprocess.get('perturbation_couplings')==[]:
3374 evaluator = MatrixElementEvaluator(model,
3375 cmd= cmd, auth_skipping = False, reuse = True)
3376 else:
3377 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3378 model=model, auth_skipping = False, reuse = True,
3379 output_path=output_path, cmd = cmd)
3380
3381 if not cmass_scheme and processes.get('perturbation_couplings')==[]:
3382
3383 logger.info('Set All width to zero for non complex mass scheme checks')
3384 for particle in evaluator.full_model.get('particles'):
3385 if particle.get('width') != 'ZERO':
3386 evaluator.full_model.get('parameter_dict')[\
3387 particle.get('width')] = 0.
3388
3389 results = run_multiprocs_no_crossings(check_lorentz_process,
3390 multiprocess,
3391 evaluator,
3392 options=options)
3393
3394 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3395
3396 clean_up(output_path)
3397
3398 return results
3399
3400 elif isinstance(processes, base_objects.Process):
3401 processes = base_objects.ProcessList([processes])
3402 elif isinstance(processes, base_objects.ProcessList):
3403 pass
3404 else:
3405 raise InvalidCmd("processes is of non-supported format")
3406
3407 assert processes, "No processes given"
3408
3409 model = processes[0].get('model')
3410
3411
3412 if processes[0].get('perturbation_couplings')==[]:
3413 evaluator = MatrixElementEvaluator(model, param_card,
3414 auth_skipping = False, reuse = True,
3415 cmd=cmd)
3416 else:
3417 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
3418 model=model,param_card=param_card,
3419 auth_skipping = False, reuse = True,
3420 output_path=output_path, cmd = cmd)
3421
3422 comparison_results = []
3423
3424
3425 for process in processes:
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435 result = check_lorentz_process(process, evaluator,options=options)
3436 if result:
3437 comparison_results.append(result)
3438
3439 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3440
3441 clean_up(output_path)
3442
3443 return comparison_results
3444
3447 """Check gauge invariance for the process, unless it is already done."""
3448
3449 amp_results = []
3450 model = process.get('model')
3451
3452 for i, leg in enumerate(process.get('legs')):
3453 leg.set('number', i+1)
3454
3455 logger.info("Checking lorentz transformations for %s" % \
3456 process.nice_string().replace('Process:', 'process'))
3457
3458 legs = process.get('legs')
3459
3460
3461 try:
3462 if process.get('perturbation_couplings')==[]:
3463 amplitude = diagram_generation.Amplitude(process)
3464 else:
3465 amplitude = loop_diagram_generation.LoopAmplitude(process)
3466 if not amplitude.get('process').get('has_born'):
3467 evaluator.loop_optimized_output = False
3468 except InvalidCmd:
3469 logging.info("No diagrams for %s" % \
3470 process.nice_string().replace('Process', 'process'))
3471 return None
3472
3473 if not amplitude.get('diagrams'):
3474
3475 logging.info("No diagrams for %s" % \
3476 process.nice_string().replace('Process', 'process'))
3477 return None
3478
3479
3480 p, w_rambo = evaluator.get_momenta(process, options)
3481
3482
3483 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3484 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3485 gen_color = True)
3486 else:
3487 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3488 optimized_output = evaluator.loop_optimized_output)
3489
3490 MLOptions = {'ImprovePS':True,'ForceMP':True}
3491 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3492 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3493 auth_skipping = True, options=options)
3494 else:
3495 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3496 auth_skipping = True, PS_name = 'original', MLOptions=MLOptions,
3497 options = options)
3498
3499 if data and data['m2']:
3500 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3501 results = [data]
3502 else:
3503 results = [('Original evaluation',data)]
3504 else:
3505 return {'process':process, 'results':'pass'}
3506
3507
3508
3509
3510 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3511 for boost in range(1,4):
3512 boost_p = boost_momenta(p, boost)
3513 results.append(evaluator.evaluate_matrix_element(matrix_element,
3514 p=boost_p,output='jamp'))
3515 else:
3516
3517 boost_p = boost_momenta(p, 3)
3518 results.append(('Z-axis boost',
3519 evaluator.evaluate_matrix_element(matrix_element, options=options,
3520 p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions)))
3521
3522
3523
3524
3525 if not options['events']:
3526 boost_p = boost_momenta(p, 1)
3527 results.append(('X-axis boost',
3528 evaluator.evaluate_matrix_element(matrix_element, options=options,
3529 p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions)))
3530 boost_p = boost_momenta(p, 2)
3531 results.append(('Y-axis boost',
3532 evaluator.evaluate_matrix_element(matrix_element,options=options,
3533 p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions)))
3534
3535
3536 rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p]
3537 results.append(('Z-axis pi/2 rotation',
3538 evaluator.evaluate_matrix_element(matrix_element,options=options,
3539 p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions)))
3540
3541 sq2 = math.sqrt(2.0)
3542 rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p]
3543 results.append(('Z-axis pi/4 rotation',
3544 evaluator.evaluate_matrix_element(matrix_element,options=options,
3545 p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions)))
3546
3547
3548 return {'process': process, 'results': results}
3549
3550
3551
3552
3553 -def check_unitary_feynman(processes_unit, processes_feynm, param_card=None,
3554 options=None, tir={}, output_path=None,
3555 cuttools="", reuse=False, cmd = FakeInterface()):
3556 """Check gauge invariance of the processes by flipping
3557 the gauge of the model
3558 """
3559
3560 mg_root = cmd._mgme_dir
3561
3562 cmass_scheme = cmd.options['complex_mass_scheme']
3563
3564 if isinstance(processes_unit, base_objects.ProcessDefinition):
3565
3566
3567 multiprocess_unit = processes_unit
3568 model = multiprocess_unit.get('model')
3569
3570
3571
3572 loop_optimized_bu = cmd.options['loop_optimized_output']
3573 cmd.options['loop_optimized_output'] = False
3574 aloha.unitary_gauge = True
3575 if processes_unit.get('perturbation_couplings')==[]:
3576 evaluator = MatrixElementEvaluator(model, param_card,
3577 cmd=cmd,auth_skipping = False, reuse = True)
3578 else:
3579 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3580 cmd=cmd, model=model,
3581 param_card=param_card,
3582 auth_skipping = False,
3583 output_path=output_path,
3584 reuse = False)
3585 if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]:
3586 logger.info('Set All width to zero for non complex mass scheme checks')
3587 for particle in evaluator.full_model.get('particles'):
3588 if particle.get('width') != 'ZERO':
3589 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3590
3591 output_u = run_multiprocs_no_crossings(get_value,
3592 multiprocess_unit,
3593 evaluator,
3594 options=options)
3595
3596 clean_added_globals(ADDED_GLOBAL)
3597
3598 if processes_unit.get('perturbation_couplings')!=[]:
3599 clean_up(output_path)
3600
3601 momentum = {}
3602 for data in output_u:
3603 momentum[data['process']] = data['p']
3604
3605 multiprocess_feynm = processes_feynm
3606 model = multiprocess_feynm.get('model')
3607
3608
3609 aloha.unitary_gauge = False
3610
3611
3612 cmd.options['loop_optimized_output'] = True
3613 if processes_feynm.get('perturbation_couplings')==[]:
3614 evaluator = MatrixElementEvaluator(model, param_card,
3615 cmd= cmd, auth_skipping = False, reuse = False)
3616 else:
3617 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3618 cmd= cmd, model=model,
3619 param_card=param_card,
3620 auth_skipping = False,
3621 output_path=output_path,
3622 reuse = False)
3623
3624 if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]:
3625
3626 for particle in evaluator.full_model.get('particles'):
3627 if particle.get('width') != 'ZERO':
3628 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3629
3630 output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm,
3631 evaluator, momentum,
3632 options=options)
3633 output = [processes_unit]
3634 for data in output_f:
3635 local_dico = {}
3636 local_dico['process'] = data['process']
3637 local_dico['value_feynm'] = data['value']
3638 local_dico['value_unit'] = [d['value'] for d in output_u
3639 if d['process'] == data['process']][0]
3640 output.append(local_dico)
3641
3642 if processes_feynm.get('perturbation_couplings')!=[] and not reuse:
3643
3644 clean_up(output_path)
3645
3646
3647 cmd.options['loop_optimized_output'] = loop_optimized_bu
3648
3649 return output
3650
3651
3652
3653
3654 else:
3655 raise InvalidCmd("processes is of non-supported format")
3656
3657 -def get_value(process, evaluator, p=None, options=None):
3658 """Return the value/momentum for a phase space point"""
3659
3660 for i, leg in enumerate(process.get('legs')):
3661 leg.set('number', i+1)
3662
3663 logger.info("Checking %s in %s gauge" % \
3664 ( process.nice_string().replace('Process:', 'process'),
3665 'unitary' if aloha.unitary_gauge else 'feynman'))
3666
3667 legs = process.get('legs')
3668
3669
3670 try:
3671 if process.get('perturbation_couplings')==[]:
3672 amplitude = diagram_generation.Amplitude(process)
3673 else:
3674 amplitude = loop_diagram_generation.LoopAmplitude(process)
3675 if not amplitude.get('process').get('has_born'):
3676 evaluator.loop_optimized_output = False
3677 except InvalidCmd:
3678 logging.info("No diagrams for %s" % \
3679 process.nice_string().replace('Process', 'process'))
3680 return None
3681
3682 if not amplitude.get('diagrams'):
3683
3684 logging.info("No diagrams for %s" % \
3685 process.nice_string().replace('Process', 'process'))
3686 return None
3687
3688 if not p:
3689
3690 p, w_rambo = evaluator.get_momenta(process, options)
3691
3692
3693 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3694 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3695 gen_color = True)
3696 else:
3697 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3698 gen_color = True, optimized_output = evaluator.loop_optimized_output)
3699
3700 mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p,
3701 output='jamp',options=options)
3702
3703 if mvalue and mvalue['m2']:
3704 return {'process':process.base_string(),'value':mvalue,'p':p}
3705
3707 """Present the results of a comparison in a nice list format for loop
3708 processes. It detail the results from each lorentz transformation performed.
3709 """
3710
3711 process = comparison_results[0]['process']
3712 results = comparison_results[0]['results']
3713
3714
3715 threshold_rotations = 1e-6
3716
3717
3718
3719 threshold_boosts = 1e-3
3720 res_str = "%s" % process.base_string()
3721
3722 transfo_col_size = 17
3723 col_size = 18
3724 transfo_name_header = 'Transformation name'
3725
3726 if len(transfo_name_header) + 1 > transfo_col_size:
3727 transfo_col_size = len(transfo_name_header) + 1
3728
3729 for transfo_name, value in results:
3730 if len(transfo_name) + 1 > transfo_col_size:
3731 transfo_col_size = len(transfo_name) + 1
3732
3733 res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \
3734 fixed_string_length("Value", col_size) + \
3735 fixed_string_length("Relative diff.", col_size) + "Result"
3736
3737 ref_value = results[0]
3738 res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \
3739 fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size)
3740
3741
3742 all_pass = True
3743 for res in results[1:]:
3744 threshold = threshold_boosts if 'BOOST' in res[0].upper() else \
3745 threshold_rotations
3746 rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\
3747 /((ref_value[1]['m2']+res[1]['m2'])/2.0))
3748 this_pass = rel_diff <= threshold
3749 if not this_pass:
3750 all_pass = False
3751 res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \
3752 fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \
3753 fixed_string_length("%1.10e" % rel_diff, col_size) + \
3754 ("Passed" if this_pass else "Failed")
3755 if all_pass:
3756 res_str += '\n' + 'Summary: passed'
3757 else:
3758 res_str += '\n' + 'Summary: failed'
3759
3760 return res_str
3761
3763 """Present the results of a comparison in a nice list format
3764 if output='fail' return the number of failed process -- for test--
3765 """
3766
3767
3768 if comparison_results[0]['process']['perturbation_couplings']!=[]:
3769 return output_lorentz_inv_loop(comparison_results, output)
3770
3771 proc_col_size = 17
3772
3773 threshold=1e-10
3774 process_header = "Process"
3775
3776 if len(process_header) + 1 > proc_col_size:
3777 proc_col_size = len(process_header) + 1
3778
3779 for proc, values in comparison_results:
3780 if len(proc) + 1 > proc_col_size:
3781 proc_col_size = len(proc) + 1
3782
3783 col_size = 18
3784
3785 pass_proc = 0
3786 fail_proc = 0
3787 no_check_proc = 0
3788
3789 failed_proc_list = []
3790 no_check_proc_list = []
3791
3792 res_str = fixed_string_length(process_header, proc_col_size) + \
3793 fixed_string_length("Min element", col_size) + \
3794 fixed_string_length("Max element", col_size) + \
3795 fixed_string_length("Relative diff.", col_size) + \
3796 "Result"
3797
3798 for one_comp in comparison_results:
3799 proc = one_comp['process'].base_string()
3800 data = one_comp['results']
3801
3802 if data == 'pass':
3803 no_check_proc += 1
3804 no_check_proc_list.append(proc)
3805 continue
3806
3807 values = [data[i]['m2'] for i in range(len(data))]
3808
3809 min_val = min(values)
3810 max_val = max(values)
3811 diff = (max_val - min_val) / abs(max_val)
3812
3813 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3814 fixed_string_length("%1.10e" % min_val, col_size) + \
3815 fixed_string_length("%1.10e" % max_val, col_size) + \
3816 fixed_string_length("%1.10e" % diff, col_size)
3817
3818 if diff < threshold:
3819 pass_proc += 1
3820 proc_succeed = True
3821 res_str += "Passed"
3822 else:
3823 fail_proc += 1
3824 proc_succeed = False
3825 failed_proc_list.append(proc)
3826 res_str += "Failed"
3827
3828
3829
3830
3831
3832 if len(data[0]['jamp'])!=0:
3833 for k in range(len(data[0]['jamp'][0])):
3834 sum = [0] * len(data)
3835
3836 for j in range(len(data[0]['jamp'])):
3837
3838 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
3839 sum = [sum[i] + values[i] for i in range(len(values))]
3840
3841
3842 min_val = min(sum)
3843 max_val = max(sum)
3844 if not max_val:
3845 continue
3846 diff = (max_val - min_val) / max_val
3847
3848 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
3849 fixed_string_length("%1.10e" % min_val, col_size) + \
3850 fixed_string_length("%1.10e" % max_val, col_size) + \
3851 fixed_string_length("%1.10e" % diff, col_size)
3852
3853 if diff > 1e-10:
3854 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3855 fail_proc += 1
3856 pass_proc -= 1
3857 failed_proc_list.append(proc)
3858 res_str += tmp_str + "Failed"
3859 elif not proc_succeed:
3860 res_str += tmp_str + "Passed"
3861
3862
3863
3864 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3865 (pass_proc, pass_proc + fail_proc,
3866 fail_proc, pass_proc + fail_proc)
3867
3868 if fail_proc != 0:
3869 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3870 if no_check_proc:
3871 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
3872
3873 if output == 'text':
3874 return res_str
3875 else:
3876 return fail_proc
3877
3879 """Present the results of a comparison in a nice list format
3880 if output='fail' return the number of failed process -- for test--
3881 """
3882
3883 proc_col_size = 17
3884
3885
3886
3887 pert_coupl = comparison_results[0]['perturbation_couplings']
3888 comparison_results = comparison_results[1:]
3889
3890 if pert_coupl:
3891 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3892 else:
3893 process_header = "Process"
3894
3895 if len(process_header) + 1 > proc_col_size:
3896 proc_col_size = len(process_header) + 1
3897
3898 for data in comparison_results:
3899 proc = data['process']
3900 if len(proc) + 1 > proc_col_size:
3901 proc_col_size = len(proc) + 1
3902
3903 pass_proc = 0
3904 fail_proc = 0
3905 no_check_proc = 0
3906
3907 failed_proc_list = []
3908 no_check_proc_list = []
3909
3910 col_size = 18
3911
3912 res_str = fixed_string_length(process_header, proc_col_size) + \
3913 fixed_string_length("Unitary", col_size) + \
3914 fixed_string_length("Feynman", col_size) + \
3915 fixed_string_length("Relative diff.", col_size) + \
3916 "Result"
3917
3918 for one_comp in comparison_results:
3919 proc = one_comp['process']
3920 data = [one_comp['value_unit'], one_comp['value_feynm']]
3921
3922
3923 if data[0] == 'pass':
3924 no_check_proc += 1
3925 no_check_proc_list.append(proc)
3926 continue
3927
3928 values = [data[i]['m2'] for i in range(len(data))]
3929
3930 min_val = min(values)
3931 max_val = max(values)
3932 diff = (max_val - min_val) / max_val
3933
3934 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3935 fixed_string_length("%1.10e" % values[0], col_size) + \
3936 fixed_string_length("%1.10e" % values[1], col_size) + \
3937 fixed_string_length("%1.10e" % diff, col_size)
3938
3939 if diff < 1e-8:
3940 pass_proc += 1
3941 proc_succeed = True
3942 res_str += "Passed"
3943 else:
3944 fail_proc += 1
3945 proc_succeed = False
3946 failed_proc_list.append(proc)
3947 res_str += "Failed"
3948
3949
3950
3951
3952
3953 if len(data[0]['jamp'])>0:
3954 for k in range(len(data[0]['jamp'][0])):
3955 sum = [0, 0]
3956
3957 for j in range(len(data[0]['jamp'])):
3958
3959 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
3960 sum = [sum[i] + values[i] for i in range(len(values))]
3961
3962
3963 min_val = min(sum)
3964 max_val = max(sum)
3965 if not max_val:
3966 continue
3967 diff = (max_val - min_val) / max_val
3968
3969 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
3970 fixed_string_length("%1.10e" % sum[0], col_size) + \
3971 fixed_string_length("%1.10e" % sum[1], col_size) + \
3972 fixed_string_length("%1.10e" % diff, col_size)
3973
3974 if diff > 1e-10:
3975 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3976 fail_proc += 1
3977 pass_proc -= 1
3978 failed_proc_list.append(proc)
3979 res_str += tmp_str + "Failed"
3980 elif not proc_succeed:
3981 res_str += tmp_str + "Passed"
3982
3983
3984
3985 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3986 (pass_proc, pass_proc + fail_proc,
3987 fail_proc, pass_proc + fail_proc)
3988
3989 if fail_proc != 0:
3990 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3991 if no_check_proc:
3992 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
3993
3994
3995 if output == 'text':
3996 return res_str
3997 else:
3998 return fail_proc
3999