1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Methods and classes to export matrix elements to v4 format."""
16
17 from __future__ import absolute_import
18 import copy
19 import fractions
20 import glob
21 import logging
22 import os
23 import stat
24 import sys
25 import re
26 import shutil
27 import subprocess
28 import itertools
29 import time
30 import datetime
31
32
33 import aloha
34
35 import madgraph.core.base_objects as base_objects
36 import madgraph.core.color_algebra as color
37 import madgraph.core.helas_objects as helas_objects
38 import madgraph.loop.loop_helas_objects as loop_helas_objects
39 import madgraph.iolibs.drawing_eps as draw
40 import madgraph.iolibs.files as files
41 import madgraph.iolibs.group_subprocs as group_subprocs
42 import madgraph.various.banner as banner_mod
43 import madgraph.various.misc as misc
44 import madgraph.various.q_polynomial as q_polynomial
45 import madgraph.iolibs.file_writers as writers
46 import madgraph.iolibs.gen_infohtml as gen_infohtml
47 import madgraph.iolibs.template_files as template_files
48 import madgraph.iolibs.ufo_expression_parsers as parsers
49 import madgraph.iolibs.export_v4 as export_v4
50 import madgraph.various.diagram_symmetry as diagram_symmetry
51 import madgraph.various.process_checks as process_checks
52 import madgraph.various.progressbar as pbar
53 import madgraph.various.q_polynomial as q_polynomial
54 import madgraph.core.color_amp as color_amp
55 import madgraph.iolibs.helas_call_writers as helas_call_writers
56 import models.check_param_card as check_param_card
57 from madgraph.loop.loop_base_objects import LoopDiagram
58 from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles
59 from six.moves import range
60 from six.moves import zip
61
62
63
64 pjoin = os.path.join
65
66 import aloha.create_aloha as create_aloha
67 import models.write_param_card as param_writer
68 from madgraph import MadGraph5Error, MG5DIR, InvalidCmd
69 from madgraph.iolibs.files import cp, ln, mv
70 pjoin = os.path.join
71 _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/'
72 logger = logging.getLogger('madgraph.loop_exporter')
73
74
75
76
78 """ Class to define general helper functions to the different
79 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both
80 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...).
81 It plays the same role as ProcessExporterFrotran and simply defines here
82 loop-specific helpers functions necessary for all loop exporters.
83 Notice that we do not have LoopExporterFortran inheriting from
84 ProcessExporterFortran but give access to arguments like dir_path and
85 clean using options. This avoids method resolution object ambiguity"""
86
87 default_opt = dict(export_v4.ProcessExporterFortran.default_opt)
88 default_opt.update({'clean': False, 'complex_mass':False,
89 'export_format':'madloop', 'mp':True,
90 'loop_dir':'', 'cuttools_dir':'',
91 'fortran_compiler':'gfortran',
92 'SubProc_prefix': 'P',
93 'output_dependencies': 'external',
94 'compute_color_flows': False,
95 'mode':''})
96
97 include_names = {'ninja' : 'mninja.mod',
98 'golem' : 'generic_function_1p.mod',
99 'samurai':'msamurai.mod',
100 'collier': 'collier.mod'}
101
102 - def __init__(self, dir_path = "", opt=None):
103 """Initiate the LoopExporterFortran with directory information on where
104 to find all the loop-related source files, like CutTools"""
105
106
107 self.opt = dict(self.default_opt)
108 if opt:
109 self.opt.update(opt)
110
111 self.SubProc_prefix = self.opt['SubProc_prefix']
112 self.loop_dir = self.opt['loop_dir']
113 self.cuttools_dir = self.opt['cuttools_dir']
114 self.fortran_compiler = self.opt['fortran_compiler']
115 self.dependencies = self.opt['output_dependencies']
116 self.compute_color_flows = self.opt['compute_color_flows']
117
118 super(LoopExporterFortran,self).__init__(dir_path, self.opt)
119
120
194
196 """ Caches the aloha model created here as an attribute of the loop
197 exporter so that it can later be used in the LoopHelasMatrixElement
198 in the function compute_all_analytic_information for recycling aloha
199 computations across different LoopHelasMatrixElements steered by the
200 same loop exporter.
201 """
202 if not hasattr(self, 'aloha_model'):
203 self.aloha_model = create_aloha.AbstractALOHAModel(model.get('modelpath'))
204 return self.aloha_model
205
206
207
208
220
221
222
223
226
227 """Class to take care of exporting a set of loop matrix elements in the
228 Fortran format."""
229
230 template_dir=os.path.join(_file_path,'iolibs/template_files/loop')
231 madloop_makefile_name = 'makefile'
232
233 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner(
234 style='classic2', color='green',
235 top_frame_char = '=', bottom_frame_char = '=',
236 left_frame_char = '{',right_frame_char = '}',
237 print_frame=True, side_margin = 7, up_margin = 1)
238
243
250
251 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
252 """create the global information for loops"""
253
254 super(LoopProcessExporterFortranSA,self).finalize(matrix_element,
255 cmdhistory, MG5options, outputflag)
256
257
258 MLCard = banner_mod.MadLoopParam(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat'))
259
260
261 if self.has_loop_induced:
262 MLCard['MLReductionLib'] = "7|6|1"
263
264
265
266
267
268 MLCard['COLLIERComputeUVpoles'] = False
269 MLCard['COLLIERComputeIRpoles'] = False
270
271 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams_default.dat'))
272 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat'))
273
276
278 """ Write the general check_sa.py in SubProcesses that calls all processes successively."""
279
280
281 file = open(os.path.join(self.template_dir,\
282 'check_sa_all.py.inc')).read()
283 open(output_path,'w').writelines(file)
284
285 os.chmod(output_path, os.stat(output_path).st_mode | stat.S_IEXEC)
286
287
289 """write a function to call the correct matrix element"""
290
291 template = """
292 %(python_information)s
293
294 SUBROUTINE INITIALISE(PATH)
295 C ROUTINE FOR F2PY to read the benchmark point.
296 IMPLICIT NONE
297 CHARACTER*512 PATH
298 CF2PY INTENT(IN) :: PATH
299 CALL SETPARA(PATH) !first call to setup the paramaters
300 RETURN
301 END
302
303 subroutine CHANGE_PARA(name, value)
304 implicit none
305 CF2PY intent(in) :: name
306 CF2PY intent(in) :: value
307
308 character*512 name
309 double precision value
310
311 include '../Source/MODEL/input.inc'
312 include '../Source/MODEL/coupl.inc'
313 include '../Source/MODEL/mp_coupl.inc'
314 include '../Source/MODEL/mp_input.inc'
315
316 SELECT CASE (name)
317 %(parameter_setup)s
318 CASE DEFAULT
319 write(*,*) 'no parameter matching', name
320 END SELECT
321
322 return
323 end
324
325 subroutine update_all_coup()
326 implicit none
327 call coup()
328 call printout()
329 return
330 end
331
332
333 SUBROUTINE SET_MADLOOP_PATH(PATH)
334 C Routine to set the path of the folder 'MadLoop5_resources' to MadLoop
335 CHARACTER(512) PATH
336 CF2PY intent(in)::path
337 CALL SETMADLOOPPATH(PATH)
338 END
339
340 subroutine smatrixhel(pdgs, procid, npdg, p, ALPHAS, SCALES2, nhel, ANS, RETURNCODE)
341 IMPLICIT NONE
342
343 CF2PY double precision, intent(in), dimension(0:3,npdg) :: p
344 CF2PY integer, intent(in), dimension(npdg) :: pdgs
345 CF2PY integer, intent(in):: procid
346 CF2PY integer, intent(in) :: npdg
347 CF2PY double precision, intent(out) :: ANS
348 CF2PY integer, intent(out) :: RETURNCODE
349 CF2PY double precision, intent(in) :: ALPHAS
350 CF2PY double precision, intent(in) :: SCALES2
351
352 integer pdgs(*)
353 integer npdg, nhel, RETURNCODE, procid
354 double precision p(*)
355 double precision ANS, ALPHAS, PI,SCALES2
356 1 continue
357 %(smatrixhel)s
358
359 return
360 end
361
362 subroutine get_pdg_order(OUT, ALLPROC)
363 IMPLICIT NONE
364 CF2PY INTEGER, intent(out) :: OUT(%(nb_me)i,%(maxpart)i)
365 CF2PY INTEGER, intent(out) :: ALLPROC(%(nb_me)i)
366 INTEGER OUT(%(nb_me)i,%(maxpart)i), PDGS(%(nb_me)i,%(maxpart)i)
367 INTEGER ALLPROC(%(nb_me)i),PIDs(%(nb_me)i)
368 DATA PDGS/ %(pdgs)s /
369 DATA PIDS/ %(pids)s /
370 OUT=PDGS
371 ALLPROC = PIDS
372 RETURN
373 END
374
375 subroutine get_prefix(PREFIX)
376 IMPLICIT NONE
377 CF2PY CHARACTER*20, intent(out) :: PREFIX(%(nb_me)i)
378 character*20 PREFIX(%(nb_me)i),PREF(%(nb_me)i)
379 DATA PREF / '%(prefix)s'/
380 PREFIX = PREF
381 RETURN
382 END
383
384 """
385
386 allids = list(self.prefix_info.keys())
387 allprefix = [self.prefix_info[key][0] for key in allids]
388 min_nexternal = min([len(ids[0]) for ids in allids])
389 max_nexternal = max([len(ids[0]) for ids in allids])
390
391 info = []
392 for (key,pid), (prefix, tag) in self.prefix_info.items():
393 info.append('#PY %s : %s # %s %s' % (tag, key, prefix, pid))
394
395
396 text = []
397 for n_ext in range(min_nexternal, max_nexternal+1):
398 current_id = [ids[0] for ids in allids if len(ids[0])==n_ext]
399 current_pid = [ids[1] for ids in allids if len(ids[0])==n_ext]
400 if not current_id:
401 continue
402 if min_nexternal != max_nexternal:
403 if n_ext == min_nexternal:
404 text.append(' if (npdg.eq.%i)then' % n_ext)
405 else:
406 text.append(' else if (npdg.eq.%i)then' % n_ext)
407 for ii,pdgs in enumerate(current_id):
408 pid = current_pid[ii]
409 condition = '.and.'.join(['%i.eq.pdgs(%i)' %(pdg, i+1) for i, pdg in enumerate(pdgs)])
410 if ii==0:
411 text.append( ' if(%s.and.(procid.le.0.or.procid.eq.%d)) then ! %i' % (condition, pid, len(pdgs)))
412 else:
413 text.append( ' else if(%s.and.(procid.le.0.or.procid.eq.%d)) then ! %i' % (condition,pid,len(pdgs)))
414 text.append(' call %sget_me(p, ALPHAS, DSQRT(SCALES2), NHEL, ANS, RETURNCODE)' % self.prefix_info[(pdgs,pid)][0])
415 text.append( ' else if(procid.gt.0) then !')
416 text.append( ' procid = -1' )
417 text.append( ' goto 1' )
418
419 text.append(' endif')
420
421 if min_nexternal != max_nexternal:
422 text.append('endif')
423
424 params = self.get_model_parameter(self.model)
425 parameter_setup =[]
426 for key, var in params.items():
427 parameter_setup.append(' CASE ("%s")\n %s = value\n MP__%s = value'
428 % (key, var, var))
429
430
431
432 formatting = {'python_information':'\n'.join(info),
433 'smatrixhel': '\n'.join(text),
434 'maxpart': max_nexternal,
435 'nb_me': len(allids),
436 'pdgs': ','.join([str(pdg[i]) if i<len(pdg) else '0'
437 for i in range(max_nexternal) \
438 for (pdg,pid) in allids]),
439 'prefix':'\',\''.join(allprefix),
440 'parameter_setup': '\n'.join(parameter_setup),
441 'pids': ','.join(str(pid) for (pdg,pid) in allids),
442 }
443
444
445 text = template % formatting
446 fsock = writers.FortranWriter(pjoin(self.dir_path, 'SubProcesses', 'all_matrix.f'),'w')
447 fsock.writelines(text)
448 fsock.close()
449
450
451
453 """ Perform additional actions specific for this class when setting
454 up the template with the copy_template function."""
455
456
457 cpfiles= ["Cards/MadLoopParams.dat",
458 "SubProcesses/MadLoopParamReader.f",
459 "SubProcesses/MadLoopParams.inc"]
460 if copy_Source_makefile:
461 cpfiles.append("Source/makefile")
462
463 for file in cpfiles:
464 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file),
465 os.path.join(self.dir_path, file))
466
467 cp(pjoin(self.loop_dir,'StandAlone/Cards/MadLoopParams.dat'),
468 pjoin(self.dir_path, 'Cards/MadLoopParams_default.dat'))
469
470 ln(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), pjoin(self.dir_path,'SubProcesses'))
471
472
473 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'),
474 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name))
475
476
477
478 link_tir_libs=[]
479 tir_libs=[]
480
481 filePath = pjoin(self.dir_path, 'SubProcesses',
482 'MadLoop_makefile_definitions')
483 calls = self.write_loop_makefile_definitions(
484 writers.MakefileWriter(filePath),link_tir_libs,tir_libs)
485
486
487
488
489 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone',
490 "SubProcesses","MadLoopCommons.inc")).read()
491 writer = writers.FortranWriter(os.path.join(self.dir_path,
492 "SubProcesses","MadLoopCommons.f"))
493 writer.writelines(MadLoopCommon%{
494 'print_banner_commands':self.MadLoop_banner}, context={
495 'collier_available':False})
496 writer.close()
497
498
499 if not os.path.exists(pjoin(self.dir_path,'SubProcesses',
500 'MadLoop5_resources')):
501 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses',
502 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses'))
503
504
505 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'),
506 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources'))
507 ln(pjoin(self.dir_path,'Cards','param_card.dat'),
508 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources'))
509 ln(pjoin(self.dir_path,'Cards','ident_card.dat'),
510 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources'))
511
512
513
514 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')):
515 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f'))
516
517 cwd = os.getcwd()
518 dirpath = os.path.join(self.dir_path, 'SubProcesses')
519 try:
520 os.chdir(dirpath)
521 except os.error:
522 logger.error('Could not cd to directory %s' % dirpath)
523 return 0
524
525
526 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\
527 writers.FortranWriter('cts_mpc.h'))
528
529
530 os.chdir(cwd)
531
532
533 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
534
535
536
539 """ Create the file makefile which links to the TIR libraries."""
540
541 file = open(os.path.join(self.loop_dir,'StandAlone',
542 'SubProcesses','MadLoop_makefile_definitions.inc')).read()
543 replace_dict={}
544 replace_dict['link_tir_libs']=' '.join(link_tir_libs)
545 replace_dict['tir_libs']=' '.join(tir_libs)
546 replace_dict['dotf']='%.f'
547 replace_dict['prefix']= self.SubProc_prefix
548 replace_dict['doto']='%.o'
549 replace_dict['tir_include']=' '.join(tir_include)
550 file=file%replace_dict
551 if writer:
552 writer.writelines(file)
553 else:
554 return file
555
556 - def convert_model(self, model, wanted_lorentz = [],
557 wanted_couplings = []):
564
565 - def get_ME_identifier(self, matrix_element,
566 group_number = None, group_elem_number = None):
567 """ A function returning a string uniquely identifying the matrix
568 element given in argument so that it can be used as a prefix to all
569 MadLoop5 subroutines and common blocks related to it. This allows
570 to compile several processes into one library as requested by the
571 BLHA (Binoth LesHouches Accord) guidelines.
572 The arguments group_number and proc_id are just for the LoopInduced
573 output with MadEvent."""
574
575
576
577
578 if (not group_number is None) and group_elem_number is None:
579 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'),
580 group_number)
581 elif group_number is None or group_elem_number is None:
582 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id')
583 else:
584 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'),
585 group_number, group_elem_number)
586
589 """Returns the name of the SubProcess directory, which can contain
590 the process goup and group element number for the case of loop-induced
591 integration with MadEvent."""
592
593
594
595
596 if not group_number is None and group_elem_number is None:
597 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'),
598 group_number,process.shell_string(print_id=False))
599 elif group_number is None or group_elem_number is None:
600 return "%s%s" %(self.SubProc_prefix,process.shell_string())
601 else:
602 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'),
603 group_number, group_elem_number,process.shell_string(print_id=False))
604
605
606
607
609 """ Different daughter classes might want different compilers.
610 Here, the gfortran compiler is used throughout the compilation
611 (mandatory for CutTools written in f90) """
612 if isinstance(compiler, str):
613 fortran_compiler = compiler
614 compiler = export_v4.default_compiler
615 compiler['fortran'] = fortran_compiler
616
617 if not compiler['fortran'] is None and not \
618 any([name in compiler['fortran'] for name in \
619 ['gfortran','ifort']]):
620 logger.info('For loop processes, the compiler must be fortran90'+\
621 'compatible, like gfortran.')
622 compiler['fortran'] = 'gfortran'
623 self.set_compiler(compiler,True)
624 else:
625 self.set_compiler(compiler)
626
627 self.set_cpp_compiler(compiler['cpp'])
628
630
631
632
633
634
635
636
637 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE)
638
639 def replaceWith(match_obj):
640 return match_obj.group('toSub')+'MP_'
641
642 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\
643 re.IGNORECASE | re.MULTILINE)
644
645 for i, helas_call in enumerate(helas_calls_list):
646 new_helas_call=MP.sub(replaceWith,helas_call)
647 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\
648 new_helas_call)
649
651 """ In the loop output, we don't need the files from the Source folder """
652 pass
653
655 """ Add the linking of the additional model files for multiple precision
656 """
657 super(LoopProcessExporterFortranSA, self).make_model_symbolic_link()
658 model_path = self.dir_path + '/Source/MODEL/'
659 ln(model_path + '/mp_coupl.inc', self.dir_path + '/SubProcesses')
660 ln(model_path + '/mp_coupl_same_name.inc', self.dir_path + '/SubProcesses')
661
663 """ Compiles the additional dependences for loop (such as CutTools)."""
664 super(LoopProcessExporterFortranSA, self).make()
665
666
667 libdir = os.path.join(self.dir_path,'lib')
668 sourcedir = os.path.join(self.dir_path,'Source')
669 if self.dependencies=='internal':
670 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \
671 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))):
672 if os.path.exists(pjoin(sourcedir,'CutTools')):
673 logger.info('Compiling CutTools (can take a couple of minutes) ...')
674 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1)
675 logger.info(' ...done.')
676 else:
677 raise MadGraph5Error('Could not compile CutTools because its'+\
678 ' source directory could not be found in the SOURCE folder.')
679 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \
680 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))):
681 raise MadGraph5Error('CutTools compilation failed.')
682
683
684
685 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin(
686 libdir, 'libcts.a')))),'compiler_version.log')
687 if os.path.exists(compiler_log_path):
688 compiler_version_used = open(compiler_log_path,'r').read()
689 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\
690 pjoin(sourcedir,'make_opts')))) in compiler_version_used:
691 if os.path.exists(pjoin(sourcedir,'CutTools')):
692 logger.info('CutTools was compiled with a different fortran'+\
693 ' compiler. Re-compiling it now...')
694 misc.compile(['cleanCT'], cwd = sourcedir)
695 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1)
696 logger.info(' ...done.')
697 else:
698 raise MadGraph5Error("CutTools installation in %s"\
699 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\
700 " seems to have been compiled with a different compiler than"+\
701 " the one specified in MG5_aMC. Please recompile CutTools.")
702
703 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
704 """Concatenate the coefficient information to reduce it to
705 (fraction, is_imaginary) """
706
707 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power
708
709 return (total_coeff, is_imaginary)
710
712 """ Returns a list with element 'i' being a list of tuples corresponding
713 to all apparition of amplitude number 'i' in the jamp number 'j'
714 with coeff 'coeff_j'. The format of each tuple describing an apparition
715 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag)."""
716
717 if(isinstance(col_amps,list)):
718 if(col_amps and isinstance(col_amps[0],list)):
719 color_amplitudes=col_amps
720 else:
721 raise MadGraph5Error("Incorrect col_amps argument passed to get_amp_to_jamp_map")
722 else:
723 raise MadGraph5Error("Incorrect col_amps argument passed to get_amp_to_jamp_map")
724
725
726 res_list = [[] for i in range(n_amps)]
727 for i, coeff_list in enumerate(color_amplitudes):
728 for (coefficient, amp_number) in coeff_list:
729 res_list[amp_number-1].append((i,self.cat_coeff(\
730 coefficient[0],coefficient[1],coefficient[2],coefficient[3])))
731
732 return res_list
733
735 """Return the color matrix definition lines. This color matrix is of size
736 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born
737 amplitude."""
738
739 logger.info('Computing diagram color coefficients')
740
741
742
743
744
745 ampl_to_jampl=self.get_amp_to_jamp_map(\
746 matrix_element.get_loop_color_amplitudes(),
747 matrix_element.get_number_of_loop_amplitudes())
748 if matrix_element.get('processes')[0].get('has_born'):
749 ampb_to_jampb=self.get_amp_to_jamp_map(\
750 matrix_element.get_born_color_amplitudes(),
751 matrix_element.get_number_of_born_amplitudes())
752 else:
753 ampb_to_jampb=ampl_to_jampl
754
755 if matrix_element.get('color_matrix'):
756 ColorMatrixDenom = \
757 matrix_element.get('color_matrix').get_line_denominators()
758 ColorMatrixNum = [ matrix_element.get('color_matrix').\
759 get_line_numerators(index, denominator) for
760 (index, denominator) in enumerate(ColorMatrixDenom) ]
761 else:
762 ColorMatrixDenom= [1]
763 ColorMatrixNum = [[1]]
764
765
766 ColorMatrixNumOutput=[]
767 ColorMatrixDenomOutput=[]
768
769
770
771 start = time.time()
772 progress_bar = None
773 time_info = False
774 for i, jampl_list in enumerate(ampl_to_jampl):
775
776
777
778 if i==5:
779 elapsed_time = time.time()-start
780 t = len(ampl_to_jampl)*(elapsed_time/5.0)
781 if t > 10.0:
782 time_info = True
783 logger.info('The color factors computation will take '+\
784 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\
785 'Started on %s.'%datetime.datetime.now().strftime(\
786 "%d-%m-%Y %H:%M"))
787 if logger.getEffectiveLevel()<logging.WARNING:
788 widgets = ['Color computation:', pbar.Percentage(), ' ',
789 pbar.Bar(),' ', pbar.ETA(), ' ']
790 progress_bar = pbar.ProgressBar(widgets=widgets,
791 maxval=len(ampl_to_jampl), fd=sys.stdout)
792
793 if not progress_bar is None:
794 progress_bar.update(i+1)
795
796 sys.stdout.flush()
797
798 line_num=[]
799 line_denom=[]
800
801
802
803
804
805
806
807
808 if len(jampl_list)==0:
809 line_num=[0]*len(ampb_to_jampb)
810 line_denom=[1]*len(ampb_to_jampb)
811 ColorMatrixNumOutput.append(line_num)
812 ColorMatrixDenomOutput.append(line_denom)
813 continue
814
815 for jampb_list in ampb_to_jampb:
816 real_num=0
817 imag_num=0
818 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]*
819 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for
820 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in
821 itertools.product(jampl_list,jampb_list)])
822 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \
823 itertools.product(jampl_list,jampb_list):
824
825
826 buff_num=ampl_coeff[0].numerator*\
827 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\
828 abs(common_denom)/(ampl_coeff[0].denominator*\
829 ampb_coeff[0].denominator*ColorMatrixDenom[jampl])
830
831
832
833 if ampl_coeff[1] and ampb_coeff[1]:
834 real_num=real_num+buff_num
835 elif not ampl_coeff[1] and not ampb_coeff[1]:
836 real_num=real_num+buff_num
837 elif not ampl_coeff[1] and ampb_coeff[1]:
838 imag_num=imag_num-buff_num
839 else:
840 imag_num=imag_num+buff_num
841 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\
842 "color matrix element which has both a real and imaginary part."
843 if imag_num!=0:
844 assert int(imag_num) == imag_num and int(common_denom) == common_denom
845 res=fractions.Fraction(int(imag_num),int(common_denom))
846 line_num.append(res.numerator)
847
848
849 line_denom.append(res.denominator*-1)
850 else:
851 assert int(real_num) == real_num and int(common_denom) == common_denom
852 res=fractions.Fraction(int(real_num),int(common_denom))
853 line_num.append(res.numerator)
854
855 line_denom.append(res.denominator)
856
857 ColorMatrixNumOutput.append(line_num)
858 ColorMatrixDenomOutput.append(line_denom)
859
860 if time_info:
861 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\
862 "%d-%m-%Y %H:%M"))
863 if progress_bar!=None:
864 progress_bar.finish()
865
866 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
867
868 - def get_context(self,matrix_element):
869 """ Returns the contextual variables which need to be set when
870 pre-processing the template files."""
871
872
873
874
875
876
877 try:
878 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO']
879 except (KeyError, AttributeError):
880 n_squared_split_orders = 1
881
882 LoopInduced = not matrix_element.get('processes')[0].get('has_born')
883 self.has_loop_induced = max(LoopInduced, self.has_loop_induced)
884
885 ComputeColorFlows = self.compute_color_flows or LoopInduced
886
887
888 AmplitudeReduction = LoopInduced or ComputeColorFlows
889
890
891 TIRCaching = AmplitudeReduction or n_squared_split_orders>1
892 MadEventOutput = False
893 return {'LoopInduced': LoopInduced,
894 'ComputeColorFlows': ComputeColorFlows,
895 'AmplitudeReduction': AmplitudeReduction,
896 'TIRCaching': TIRCaching,
897 'MadEventOutput': MadEventOutput}
898
899
900
901
902
903 - def generate_loop_subprocess(self, matrix_element, fortran_model,
904 group_number = None, proc_id = None, config_map=None, unique_id=None):
905 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone,
906 including the necessary loop_matrix.f, born_matrix.f and include files.
907 Notice that this is too different from generate_subprocess_directory
908 so that there is no point reusing this mother function.
909 The 'group_number' and 'proc_id' options are only used for the LoopInduced
910 MadEvent output and only to specify the ME_identifier and the P*
911 SubProcess directory name."""
912
913 cwd = os.getcwd()
914 proc_dir_name = self.get_SubProc_folder_name(
915 matrix_element.get('processes')[0],group_number,proc_id)
916 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name)
917
918 try:
919 os.mkdir(dirpath)
920 except os.error as error:
921 logger.warning(error.strerror + " " + dirpath)
922
923 try:
924 os.chdir(dirpath)
925 except os.error:
926 logger.error('Could not cd to directory %s' % dirpath)
927 return 0
928
929 logger.info('Creating files in directory %s' % dirpath)
930
931 if unique_id is None:
932 raise MadGraph5Error('A unique id must be provided to the function'+\
933 'generate_loop_subprocess of LoopProcessExporterFortranSA.')
934
935 open('unique_id.inc','w').write(
936 """ integer UNIQUE_ID
937 parameter(UNIQUE_ID=%d)"""%unique_id)
938
939
940 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
941
942 calls=self.write_loop_matrix_element_v4(None,matrix_element,
943 fortran_model, group_number = group_number,
944 proc_id = proc_id, config_map = config_map)
945
946
947
948
949
950 if matrix_element.get('processes')[0].get('has_born'):
951 filename = 'born_matrix.f'
952 calls = self.write_bornmatrix(
953 writers.FortranWriter(filename),
954 matrix_element,
955 fortran_model)
956
957 filename = 'pmass.inc'
958 self.write_pmass_file(writers.FortranWriter(filename),
959 matrix_element)
960
961 filename = 'ngraphs.inc'
962 self.write_ngraphs_file(writers.FortranWriter(filename),
963 len(matrix_element.get_all_amplitudes()))
964
965
966
967 loop_diags = [loop_diag for loop_diag in\
968 matrix_element.get('base_amplitude').get('loop_diagrams')\
969 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0]
970 if len(loop_diags)>5000:
971 logger.info("There are more than 5000 loop diagrams."+\
972 "Only the first 5000 are drawn.")
973 filename = "loop_matrix.ps"
974 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList(
975 loop_diags[:5000]),filename,
976 model=matrix_element.get('processes')[0].get('model'),amplitude='')
977 logger.info("Drawing loop Feynman diagrams for " + \
978 matrix_element.get('processes')[0].nice_string())
979 plot.draw()
980
981 if matrix_element.get('processes')[0].get('has_born'):
982 filename = "born_matrix.ps"
983 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\
984 get('born_diagrams'),
985 filename,
986 model=matrix_element.get('processes')[0].\
987 get('model'),
988 amplitude='')
989 logger.info("Generating born Feynman diagrams for " + \
990 matrix_element.get('processes')[0].nice_string(\
991 print_weighted=False))
992 plot.draw()
993
994 self.link_files_from_Subprocesses(self.get_SubProc_folder_name(
995 matrix_element.get('processes')[0],group_number,proc_id))
996
997
998 os.chdir(cwd)
999
1000 if not calls:
1001 calls = 0
1002 return calls
1003
1005 """ To link required files from the Subprocesses directory to the
1006 different P* ones"""
1007
1008 linkfiles = ['coupl.inc',
1009 'cts_mprec.h', 'cts_mpc.h', 'mp_coupl.inc',
1010 'mp_coupl_same_name.inc',
1011 'MadLoopParamReader.f','MadLoopCommons.f',
1012 'MadLoopParams.inc','global_specs.inc']
1013
1014 for file in linkfiles:
1015 ln('../%s' % file)
1016
1017 ln('../%s'%self.madloop_makefile_name, name='makefile')
1018
1019
1020 ln('../../lib/mpmodule.mod')
1021
1022
1023 ln('../MadLoop5_resources')
1024
1027 """Generates the entries for the general replacement dictionary used
1028 for the different output codes for this exporter.The arguments
1029 group_number and proc_id are just for the LoopInduced output with MadEvent."""
1030
1031 dict={}
1032
1033
1034
1035
1036 dict['proc_prefix'] = self.get_ME_identifier(matrix_element,
1037 group_number = group_number, group_elem_number = proc_id)
1038
1039 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']:
1040 for proc in matrix_element.get('processes'):
1041 ids = [l.get('id') for l in proc.get('legs_with_decays')]
1042 self.prefix_info[tuple(ids),proc.get('id')] = [dict['proc_prefix'], proc.get_tag()]
1043
1044
1045
1046 dict['proc_id'] = ''
1047
1048 info_lines = self.get_mg5_info_lines()
1049 dict['info_lines'] = info_lines
1050
1051 process_lines = self.get_process_info_lines(matrix_element)
1052 dict['process_lines'] = process_lines
1053
1054 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
1055 dict['nexternal'] = nexternal
1056 dict['nincoming'] = ninitial
1057
1058 ncomb = matrix_element.get_helicity_combinations()
1059 dict['ncomb'] = ncomb
1060
1061 nloopamps = matrix_element.get_number_of_loop_amplitudes()
1062 dict['nloopamps'] = nloopamps
1063
1064 nloopdiags = len(matrix_element.get('diagrams'))
1065 dict['nloopdiags'] = nloopdiags
1066
1067 nctamps = matrix_element.get_number_of_CT_amplitudes()
1068 dict['nctamps'] = nctamps
1069
1070 nwavefuncs = matrix_element.get_number_of_external_wavefunctions()
1071 dict['nwavefuncs'] = nwavefuncs
1072
1073 dict['real_dp_format']='real*8'
1074 dict['real_mp_format']='real*16'
1075
1076 dict['complex_dp_format']='complex*16'
1077 dict['complex_mp_format']='complex*32'
1078
1079 dict['mass_dp_format'] = dict['complex_dp_format']
1080 dict['mass_mp_format'] = dict['complex_mp_format']
1081
1082
1083 dict['nmultichannels'] = 0
1084 dict['nmultichannel_configs'] = 0
1085 dict['config_map_definition'] = ''
1086 dict['config_index_map_definition'] = ''
1087
1088
1089
1090
1091
1092 if matrix_element.get('processes')[0].get('has_born'):
1093 dict['color_matrix_size'] = 'nbornamps'
1094 dict['get_nsqso_born']=\
1095 "include 'nsqso_born.inc'"
1096 else:
1097 dict['get_nsqso_born']="""INTEGER NSQSO_BORN
1098 PARAMETER (NSQSO_BORN=0)
1099 """
1100 dict['color_matrix_size'] = 'nloopamps'
1101
1102
1103
1104
1105 if matrix_element.get('processes')[0].get('has_born'):
1106
1107 nbornamps = matrix_element.get_number_of_born_amplitudes()
1108 dict['nbornamps'] = nbornamps
1109 dict['ncomb_helas_objs'] = ',ncomb'
1110 dict['nbornamps_decl'] = \
1111 """INTEGER NBORNAMPS
1112 PARAMETER (NBORNAMPS=%d)"""%nbornamps
1113 dict['nBornAmps'] = nbornamps
1114
1115 else:
1116 dict['ncomb_helas_objs'] = ''
1117 dict['dp_born_amps_decl'] = ''
1118 dict['dp_born_amps_decl_in_mp'] = ''
1119 dict['copy_mp_to_dp_born_amps'] = ''
1120 dict['mp_born_amps_decl'] = ''
1121 dict['nbornamps_decl'] = ''
1122 dict['nbornamps'] = 0
1123 dict['nBornAmps'] = 0
1124
1125 return dict
1126
1127 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model,
1128 group_number = None, proc_id = None, config_map = None):
1129 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and
1130 mp_born_amps_and_wfs.
1131 The arguments group_number and proc_id are just for the LoopInduced
1132 output with MadEvent and only used in get_ME_identifier.
1133 """
1134
1135
1136
1137 if config_map:
1138 raise MadGraph5Error('The default loop output cannot be used with'+\
1139 'MadEvent and cannot compute the AMP2 for multi-channeling.')
1140
1141 if not isinstance(fortran_model,\
1142 helas_call_writers.FortranUFOHelasCallWriter):
1143 raise MadGraph5Error('The loop fortran output can only'+\
1144 ' work with a UFO Fortran model')
1145
1146 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter(
1147 argument=fortran_model.get('model'),
1148 hel_sum=matrix_element.get('processes')[0].get('has_born'))
1149
1150
1151
1152
1153
1154 matrix_element.compute_all_analytic_information(
1155 self.get_aloha_model(matrix_element.get('processes')[0].get('model')))
1156
1157
1158
1159 matrix_element.rep_dict = self.generate_general_replace_dict(
1160 matrix_element, group_number = group_number, proc_id = proc_id)
1161
1162
1163 matrix_element.rep_dict['maxlcouplings']= \
1164 matrix_element.find_max_loop_coupling()
1165
1166
1167 if matrix_element.get('processes')[0].get('has_born'):
1168 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \
1169 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\
1170 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix']
1171 matrix_element.rep_dict['dp_born_amps_decl'] = \
1172 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\
1173 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix']
1174 matrix_element.rep_dict['mp_born_amps_decl'] = \
1175 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\
1176 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix']
1177 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \
1178 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO'])
1179
1180 if writer:
1181 raise MadGraph5Error('Matrix output mode no longer supported.')
1182
1183 filename = 'loop_matrix.f'
1184 calls = self.write_loopmatrix(writers.FortranWriter(filename),
1185 matrix_element,
1186 LoopFortranModel)
1187
1188
1189 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w')
1190 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix'])
1191 proc_prefix_writer.close()
1192
1193 filename = 'check_sa.f'
1194 self.write_check_sa(writers.FortranWriter(filename),matrix_element)
1195
1196 filename = 'CT_interface.f'
1197 self.write_CT_interface(writers.FortranWriter(filename),\
1198 matrix_element)
1199
1200
1201
1202 filename = 'improve_ps.f'
1203 calls = self.write_improve_ps(writers.FortranWriter(filename),
1204 matrix_element)
1205
1206 filename = 'loop_num.f'
1207 self.write_loop_num(writers.FortranWriter(filename),\
1208 matrix_element,LoopFortranModel)
1209
1210 filename = 'mp_born_amps_and_wfs.f'
1211 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\
1212 matrix_element,LoopFortranModel)
1213
1214
1215 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
1216 filename = 'nexternal.inc'
1217 self.write_nexternal_file(writers.FortranWriter(filename),
1218 nexternal, ninitial)
1219
1220 filename = 'process_info.inc'
1221 self.write_process_info_file(writers.FortranWriter(filename),
1222 matrix_element)
1223 return calls
1224
1226 """A small structural function to write the include file specifying some
1227 process characteristics."""
1228
1229 model = matrix_element.get('processes')[0].get('model')
1230 process_info = {}
1231
1232
1233
1234
1235
1236 process_info['max_spin_connected_to_loop']=\
1237 matrix_element.get_max_spin_connected_to_loop()
1238
1239 process_info['max_spin_external_particle']= max(
1240 model.get_particle(l.get('id')).get('spin') for l in
1241 matrix_element.get('processes')[0].get('legs'))
1242
1243 proc_include = \
1244 """
1245 INTEGER MAX_SPIN_CONNECTED_TO_LOOP
1246 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d)
1247 INTEGER MAX_SPIN_EXTERNAL_PARTICLE
1248 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d)
1249 """%process_info
1250
1251 writer.writelines(proc_include)
1252
1254 """ To overload the default name for this function such that the correct
1255 function is used when called from the command interface """
1256
1257 self.unique_id +=1
1258 return self.generate_loop_subprocess(matrix_element,fortran_model,
1259 unique_id=self.unique_id)
1260
1262 """Writes out the steering code check_sa. In the optimized output mode,
1263 All the necessary entries in the replace_dictionary have already been
1264 set in write_loopmatrix because it is only there that one has access to
1265 the information about split orders."""
1266 replace_dict = copy.copy(matrix_element.rep_dict)
1267 for key in ['print_so_born_results','print_so_loop_results',
1268 'write_so_born_results','write_so_loop_results','set_coupling_target']:
1269 if key not in list(replace_dict.keys()):
1270 replace_dict[key]=''
1271
1272 if matrix_element.get('processes')[0].get('has_born'):
1273 file = open(os.path.join(self.template_dir,'check_sa.inc')).read()
1274 else:
1275 file = open(os.path.join(self.template_dir,\
1276 'check_sa_loop_induced.inc')).read()
1277 file=file%replace_dict
1278 writer.writelines(file)
1279
1280
1281 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')):
1282 return
1283
1284 file = open(os.path.join(self.template_dir,\
1285 'check_py.f.inc')).read()
1286
1287 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']:
1288 replace_dict['prefix_routine'] = replace_dict['proc_prefix']
1289 else:
1290 replace_dict['prefix_routine'] = ''
1291 file=file%replace_dict
1292 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f')
1293 new_writer = writer.__class__(new_path, 'w')
1294 new_writer.writelines(file)
1295
1296 file = open(os.path.join(self.template_dir,\
1297 'check_sa.py.inc')).read()
1298
1299
1300 curr_proc = matrix_element.get('processes')[0]
1301 random_PSpoint_python_formatted = \
1302 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input.
1303 p= [[None,]*4]*%d"""%len(curr_proc.get('legs'))
1304
1305 process_definition_string = curr_proc.nice_string().replace('Process:','')
1306 file=file.format(random_PSpoint_python_formatted,process_definition_string,
1307 replace_dict['proc_prefix'].lower())
1308 new_path = writer.name.replace('check_sa.f', 'check_sa.py')
1309 new_writer = open(new_path, 'w')
1310 new_writer.writelines(file)
1311
1312 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1313
1315 """ Write out the improve_ps subroutines which modify the PS point
1316 given in input and slightly deform it to achieve exact onshellness on
1317 all external particles as well as perfect energy-momentum conservation"""
1318 replace_dict = copy.copy(matrix_element.rep_dict)
1319
1320 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial()
1321 replace_dict['ninitial']=ninitial
1322 mass_list=matrix_element.get_external_masses()[:-2]
1323 mp_variable_prefix = check_param_card.ParamCard.mp_prefix
1324
1325
1326 replace_dict['real_format']=replace_dict['real_mp_format']
1327 replace_dict['mp_prefix']='MP_'
1328 replace_dict['exp_letter']='e'
1329 replace_dict['mp_specifier']='_16'
1330 replace_dict['coupl_inc_name']='mp_coupl.inc'
1331 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\
1332 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \
1333 i, m in enumerate(mass_list)])
1334 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read()
1335 file_mp=file_mp%replace_dict
1336
1337 writer.writelines(file_mp)
1338
1340 """ Create the file containing the core subroutine called by CutTools
1341 which contains the Helas calls building the loop"""
1342
1343 if not matrix_element.get('processes') or \
1344 not matrix_element.get('diagrams'):
1345 return 0
1346
1347
1348 writers.FortranWriter.downcase = False
1349
1350 file = open(os.path.join(self.template_dir,'loop_num.inc')).read()
1351
1352 replace_dict = copy.copy(matrix_element.rep_dict)
1353
1354 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element)
1355 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling()
1356 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls)
1357
1358
1359
1360 dp_squaring_lines=['DO I=1,NBORNAMPS',
1361 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)',
1362 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1',
1363 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO']
1364 mp_squaring_lines=['DO I=1,NBORNAMPS',
1365 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)',
1366 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1',
1367 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO']
1368 if matrix_element.get('processes')[0].get('has_born'):
1369 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines)
1370 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines)
1371 else:
1372 replace_dict['dp_squaring']='RES=BUFF'
1373 replace_dict['mp_squaring']='QPRES=BUFF'
1374
1375
1376 self.turn_to_mp_calls(loop_helas_calls)
1377 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls)
1378
1379 file=file%replace_dict
1380
1381 if writer:
1382 writer.writelines(file)
1383 else:
1384 return file
1385
1387 """ Create the file CT_interface.f which contains the subroutine defining
1388 the loop HELAS-like calls along with the general interfacing subroutine.
1389 It is used to interface against any OPP tool, including Samurai and Ninja."""
1390
1391 files=[]
1392
1393
1394 replace_dict=copy.copy(matrix_element.rep_dict)
1395
1396
1397
1398 if matrix_element.get('processes')[0].get('has_born'):
1399 replace_dict['finalize_CT']='\n'.join([\
1400 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)])
1401 else:
1402 replace_dict['finalize_CT']='\n'.join([\
1403 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)])
1404
1405 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read()
1406
1407 file = file % replace_dict
1408 files.append(file)
1409
1410
1411
1412 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps()
1413
1414 for callkey in HelasLoopAmpsCallKeys:
1415 replace_dict=copy.copy(matrix_element.rep_dict)
1416
1417
1418 if matrix_element.get('processes')[0].get('has_born'):
1419 replace_dict['validh_or_nothing']=',validh'
1420 else:
1421 replace_dict['validh_or_nothing']=''
1422
1423
1424 if len(callkey)>2:
1425 replace_dict['ncplsargs']=callkey[2]
1426 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)])
1427 replace_dict['cplsargs']=cplsargs
1428 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2]
1429 replace_dict['cplsdecl']=cplsdecl
1430 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2]
1431 replace_dict['mp_cplsdecl']=mp_cplsdecl
1432 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\
1433 "MP_LC(%d)=MP_C%d"%(i,i)])\
1434 for i in range(1,callkey[2]+1)])
1435 replace_dict['cplset']=cplset
1436
1437 replace_dict['nloopline']=callkey[0]
1438 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)])
1439 replace_dict['wfsargs']=wfsargs
1440
1441 if not optimized_output:
1442 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)])
1443 else:
1444 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)])
1445 replace_dict['margs']=margs
1446 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2]
1447 replace_dict['wfsargsdecl']=wfsargsdecl
1448 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2]
1449 replace_dict['margsdecl']=margsdecl
1450 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2]
1451 replace_dict['mp_margsdecl']=mp_margsdecl
1452 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \
1453 i in range(1,callkey[1]+1)])
1454 replace_dict['weset']=weset
1455 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)])
1456 replace_dict['weset']=weset
1457 msetlines=["M2L(1)=M%d**2"%(callkey[0]),]
1458 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \
1459 i in range(2,callkey[0]+1)])
1460 replace_dict['mset']=mset
1461 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]),
1462 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])]
1463 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2),
1464 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \
1465 i in range(3,callkey[0]+3)])
1466 replace_dict['mset2']=mset2
1467 replace_dict['nwfsargs'] = callkey[1]
1468 if callkey[0]==callkey[1]:
1469 replace_dict['nwfsargs_header'] = ""
1470 replace_dict['pairingargs']=""
1471 replace_dict['pairingdecl']=""
1472 pairingset="""DO I=1,NLOOPLINE
1473 PAIRING(I)=1
1474 ENDDO
1475 """
1476 replace_dict['pairingset']=pairingset
1477 else:
1478 replace_dict['nwfsargs_header'] = '_%d'%callkey[1]
1479 pairingargs="".join([("P"+str(i)+", ") for i in \
1480 range(1,callkey[0]+1)])
1481 replace_dict['pairingargs']=pairingargs
1482 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \
1483 range(1,callkey[0]+1)])[:-2]
1484 replace_dict['pairingdecl']=pairingdecl
1485 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \
1486 i in range(1,callkey[0]+1)])
1487 replace_dict['pairingset']=pairingset
1488
1489 file = open(os.path.join(self.template_dir,\
1490 'helas_loop_amplitude.inc')).read()
1491 file = file % replace_dict
1492 files.append(file)
1493
1494 file="\n".join(files)
1495
1496 if writer:
1497 writer.writelines(file,context=self.get_context(matrix_element))
1498 else:
1499 return file
1500
1501
1502
1503 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \
1504 helas_calls, entry_name, bunch_name,n_helas=2000,
1505 required_so_broadcaster = 'LOOP_REQ_SO_DONE',
1506 continue_label = 1000, momenta_array_name='P',
1507 context={}):
1508 """ Finish the code generation with splitting.
1509 Split the helas calls in the argument helas_calls into bunches of
1510 size n_helas and place them in dedicated subroutine with name
1511 <bunch_name>_i. Also setup the corresponding calls to these subroutine
1512 in the replace_dict dictionary under the entry entry_name.
1513 The context specified will be forwarded to the the fileWriter."""
1514 helascalls_replace_dict=copy.copy(replace_dict)
1515 helascalls_replace_dict['bunch_name']=bunch_name
1516 helascalls_files=[]
1517 for i, k in enumerate(range(0, len(helas_calls), n_helas)):
1518 helascalls_replace_dict['bunch_number']=i+1
1519 helascalls_replace_dict['helas_calls']=\
1520 '\n'.join(helas_calls[k:k + n_helas])
1521 helascalls_replace_dict['required_so_broadcaster']=\
1522 required_so_broadcaster
1523 helascalls_replace_dict['continue_label']=continue_label
1524 new_helascalls_file = open(os.path.join(self.template_dir,\
1525 template_name)).read()
1526 new_helascalls_file = new_helascalls_file % helascalls_replace_dict
1527 helascalls_files.append(new_helascalls_file)
1528
1529 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\
1530 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \
1531 for a in range(len(helascalls_files))]
1532 replace_dict[entry_name]='\n'.join(helascalls_calls)
1533 if writer:
1534 for i, helascalls_file in enumerate(helascalls_files):
1535 filename = '%s_%d.f'%(bunch_name,i+1)
1536 writers.FortranWriter(filename).writelines(helascalls_file,
1537 context=context)
1538 else:
1539 masterfile='\n'.join([masterfile,]+helascalls_files)
1540
1541 return masterfile
1542
1543 - def write_loopmatrix(self, writer, matrix_element, fortran_model,
1544 noSplit=False):
1545 """Create the loop_matrix.f file."""
1546
1547 if not matrix_element.get('processes') or \
1548 not matrix_element.get('diagrams'):
1549 return 0
1550
1551
1552
1553 writers.FortranWriter.downcase = False
1554
1555 replace_dict = copy.copy(matrix_element.rep_dict)
1556
1557
1558
1559 den_factor_line = self.get_den_factor_line(matrix_element)
1560 replace_dict['den_factor_line'] = den_factor_line
1561
1562
1563 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor()
1564 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\
1565 matrix_element.get_beams_hel_avg_factor()
1566
1567
1568
1569
1570 if not matrix_element.get('processes')[0].get('has_born'):
1571 replace_dict['compute_born']=\
1572 """C There is of course no born for loop induced processes
1573 ANS(0)=0.0d0
1574 """
1575 replace_dict['set_reference']='\n'.join([
1576 'C For loop-induced, the reference for comparison is set later'+\
1577 ' from the total contribution of the previous PS point considered.',
1578 'C But you can edit here the value to be used for the first PS point.',
1579 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else',
1580 'ref=nextRef/DBLE(NPSPOINTS)','endif'])
1581 replace_dict['loop_induced_setup'] = '\n'.join([
1582 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.',
1583 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF'])
1584 replace_dict['loop_induced_finalize'] = \
1585 ("""DO I=NCTAMPS+1,NLOOPAMPS
1586 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN
1587 WRITE(*,*) '##W03 WARNING Contribution ',I
1588 WRITE(*,*) ' is unstable for helicity ',H
1589 ENDIF
1590 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN
1591 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.'
1592 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I)
1593 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I)
1594 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I)
1595 C ENDIF
1596 ENDDO
1597 1227 CONTINUE
1598 HELPICKED=HELPICKED_BU""")%replace_dict
1599 replace_dict['loop_helas_calls']=""
1600 replace_dict['nctamps_or_nloopamps']='nloopamps'
1601 replace_dict['nbornamps_or_nloopamps']='nloopamps'
1602 replace_dict['squaring']=\
1603 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J)))
1604 IF (J.EQ.1) THEN
1605 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I))
1606 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I))
1607 ENDIF"""
1608 else:
1609 replace_dict['compute_born']=\
1610 """C Compute the born, for a specific helicity if asked so.
1611 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0))
1612 """%matrix_element.rep_dict
1613 replace_dict['set_reference']=\
1614 """C We chose to use the born evaluation for the reference
1615 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict
1616 replace_dict['loop_induced_helas_calls'] = ""
1617 replace_dict['loop_induced_finalize'] = ""
1618 replace_dict['loop_induced_setup'] = ""
1619 replace_dict['nctamps_or_nloopamps']='nctamps'
1620 replace_dict['nbornamps_or_nloopamps']='nbornamps'
1621 replace_dict['squaring']='\n'.join(['DO K=1,3',
1622 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))',
1623 'ENDDO'])
1624
1625
1626
1627
1628 writers.FortranWriter('nsquaredSO.inc').writelines(
1629 """INTEGER NSQUAREDSO
1630 PARAMETER (NSQUAREDSO=0)""")
1631
1632
1633
1634 actualize_ans=[]
1635 if matrix_element.get('processes')[0].get('has_born'):
1636 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS")
1637 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \
1638 in range(1,4))
1639 actualize_ans.append(\
1640 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN")
1641 actualize_ans.append(\
1642 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'")
1643 actualize_ans.extend(["ENDIF","ENDDO"])
1644 replace_dict['actualize_ans']='\n'.join(actualize_ans)
1645 else:
1646 replace_dict['actualize_ans']=\
1647 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check.
1648 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN
1649 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.'
1650 C WRITE(*,*) 'Finite contribution = ',ANS(1)
1651 C WRITE(*,*) 'single pole contribution = ',ANS(2)
1652 C WRITE(*,*) 'double pole contribution = ',ANS(3)
1653 C ENDIF""")%replace_dict
1654
1655
1656 (CMNum,CMDenom) = self.get_color_matrix(matrix_element)
1657 CMWriter=open(pjoin('..','MadLoop5_resources',
1658 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w')
1659 for ColorLine in CMNum:
1660 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n')
1661 CMWriter.close()
1662 CMWriter=open(pjoin('..','MadLoop5_resources',
1663 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w')
1664 for ColorLine in CMDenom:
1665 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n')
1666 CMWriter.close()
1667
1668
1669 HelConfigs=matrix_element.get_helicity_matrix()
1670 HelConfigWriter=open(pjoin('..','MadLoop5_resources',
1671 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w')
1672 for HelConfig in HelConfigs:
1673 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n')
1674 HelConfigWriter.close()
1675
1676
1677 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\
1678 matrix_element)
1679
1680 loop_amp_helas_calls = [lc % matrix_element.rep_dict
1681 for lc in loop_amp_helas_calls]
1682
1683 born_ct_helas_calls, UVCT_helas_calls = \
1684 fortran_model.get_born_ct_helas_calls(matrix_element)
1685
1686
1687 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls
1688 file = open(os.path.join(self.template_dir,\
1689
1690 'loop_matrix_standalone.inc')).read()
1691
1692 if matrix_element.get('processes')[0].get('has_born'):
1693 toBeRepaced='loop_helas_calls'
1694 else:
1695 toBeRepaced='loop_induced_helas_calls'
1696
1697
1698 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)):
1699 file=self.split_HELASCALLS(writer,replace_dict,\
1700 'helas_calls_split.inc',file,born_ct_helas_calls,\
1701 'born_ct_helas_calls','helas_calls_ampb')
1702 file=self.split_HELASCALLS(writer,replace_dict,\
1703 'helas_calls_split.inc',file,loop_amp_helas_calls,\
1704 toBeRepaced,'helas_calls_ampl')
1705 else:
1706 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls)
1707 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls)
1708
1709 file = file % replace_dict
1710
1711 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*')
1712 n_loop_calls = len([call for call in loop_amp_helas_calls if not loop_calls_finder.match(call) is None])
1713 if writer:
1714
1715 writer.writelines(file)
1716 return n_loop_calls
1717 else:
1718
1719 return n_loop_calls, file
1720
1722 """Create the born_matrix.f file for the born process as for a standard
1723 tree-level computation."""
1724
1725 if not matrix_element.get('processes') or \
1726 not matrix_element.get('diagrams'):
1727 return 0
1728
1729 if not isinstance(writer, writers.FortranWriter):
1730 raise writers.FortranWriter.FortranWriterError(\
1731 "writer not FortranWriter")
1732
1733
1734
1735
1736
1737
1738
1739 bornME = helas_objects.HelasMatrixElement()
1740 for prop in bornME.keys():
1741 bornME.set(prop,copy.deepcopy(matrix_element.get(prop)))
1742 bornME.set('base_amplitude',None,force=True)
1743 bornME.set('diagrams',copy.deepcopy(\
1744 matrix_element.get_born_diagrams()))
1745 bornME.set('color_basis',copy.deepcopy(\
1746 matrix_element.get('born_color_basis')))
1747 bornME.set('color_matrix',copy.deepcopy(\
1748 color_amp.ColorMatrix(bornME.get('color_basis'))))
1749
1750
1751 bornME.optimization = True
1752 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4(
1753 writer, bornME, fortran_model,
1754 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1755
1758 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which
1759 computes just the external wavefunction and born amplitudes in
1760 multiple precision. """
1761
1762 if not matrix_element.get('processes') or \
1763 not matrix_element.get('diagrams'):
1764 return 0
1765
1766 replace_dict = copy.copy(matrix_element.rep_dict)
1767
1768
1769 if matrix_element.get('processes')[0].get('has_born'):
1770 replace_dict['h_w_suffix']=',H'
1771 else:
1772 replace_dict['h_w_suffix']=''
1773
1774
1775 born_amps_and_wfs_calls , uvct_amp_calls = \
1776 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True)
1777
1778
1779 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls
1780
1781
1782
1783 self.turn_to_mp_calls(born_amps_and_wfs_calls)
1784
1785 file = open(os.path.join(self.template_dir,\
1786 'mp_born_amps_and_wfs.inc')).read()
1787
1788 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)):
1789 file=self.split_HELASCALLS(writer,replace_dict,\
1790 'mp_helas_calls_split.inc',file,\
1791 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\
1792 'mp_helas_calls')
1793 else:
1794 replace_dict['born_amps_and_wfs_calls']=\
1795 '\n'.join(born_amps_and_wfs_calls)
1796
1797 file = file % replace_dict
1798 if writer:
1799
1800 writer.writelines(file)
1801 else:
1802
1803 return file
1804
1805
1806
1807
1808
1810 """Class to take care of exporting a set of loop matrix elements in the
1811 Fortran format which exploits the Pozzorini method of representing
1812 the loop numerators as polynomial to render its evaluations faster."""
1813
1814 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized')
1815
1816
1817 forbid_loop_grouping = False
1818
1819
1820
1821
1822
1823
1824
1825 all_tir=['pjfry','iregi','ninja','golem','samurai','collier']
1826
1827 - def __init__(self, dir_path = "", opt=None):
1828 """Initiate the LoopProcessOptimizedExporterFortranSA with directory
1829 information on where to find all the loop-related source files,
1830 like CutTools and TIR"""
1831
1832 super(LoopProcessOptimizedExporterFortranSA,self).__init__(dir_path, opt)
1833
1834
1835 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True,
1836 'samurai':True,'ninja':True,'collier':True}
1837
1838 for tir in self.all_tir:
1839 tir_dir="%s_dir"%tir
1840 if tir_dir in self.opt and not self.opt[tir_dir] is None:
1841
1842 tir_path = self.opt[tir_dir].strip()
1843 if tir_path.startswith('.'):
1844 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path))
1845 setattr(self,tir_dir,tir_path)
1846 else:
1847 setattr(self,tir_dir,'')
1848
1856
1857 - def get_context(self,matrix_element, **opts):
1858 """ Additional contextual information which needs to be created for
1859 the optimized output."""
1860
1861 context = LoopProcessExporterFortranSA.get_context(self, matrix_element,
1862 **opts)
1863
1864
1865 try:
1866 context['ninja_supports_quad_prec'] = \
1867 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir'))
1868 except AttributeError:
1869 context['ninja_supports_quad_prec'] = False
1870
1871 for tir in self.all_tir:
1872 context['%s_available'%tir]=self.tir_available_dict[tir]
1873
1874 if tir not in ['golem','pjfry','iregi','samurai','ninja','collier']:
1875 raise MadGraph5Error("%s was not a TIR currently interfaced."%tir_name)
1876
1877 return context
1878
1880 """ Perform additional actions specific for this class when setting
1881 up the template with the copy_template function."""
1882
1883
1884 link_tir_libs=[]
1885 tir_libs=[]
1886 tir_include=[]
1887
1888 for tir in self.all_tir:
1889 tir_dir="%s_dir"%tir
1890 libpath=getattr(self,tir_dir)
1891 libname="lib%s.a"%tir
1892 tir_name=tir
1893 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'),
1894 libpath,libname,tir_name=tir_name)
1895 if libpath != "":
1896 if tir in ['ninja','pjfry','golem','samurai','collier']:
1897
1898 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir))
1899 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir))
1900
1901 if tir in ['ninja']:
1902 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext))
1903 for ext in ['a','dylib','so']):
1904 raise MadGraph5Error(
1905 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath)
1906 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo'))
1907 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo'))
1908 if tir in ['ninja','golem', 'samurai','collier']:
1909 trgt_path = pjoin(os.path.dirname(libpath),'include')
1910 if os.path.isdir(trgt_path):
1911 to_include = misc.find_includes_path(trgt_path,
1912 self.include_names[tir])
1913 else:
1914 to_include = None
1915
1916 if to_include is None and tir=='collier':
1917 to_include = misc.find_includes_path(
1918 pjoin(libpath,'modules'),self.include_names[tir])
1919 if to_include is None:
1920 logger.error(
1921 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+
1922 'Generation carries on but you will need to edit the include path by hand in the makefiles.')
1923 to_include = '<Not_found_define_it_yourself>'
1924 tir_include.append('-I %s'%str(to_include))
1925
1926
1927
1928
1929 name_map = {'golem':'golem95','samurai':'samurai',
1930 'ninja':'ninja','collier':'collier'}
1931 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'),
1932 name='%s_include'%name_map[tir],abspath=True)
1933 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'),
1934 name='%s_lib'%name_map[tir],abspath=True)
1935 else :
1936 link_tir_libs.append('-l%s'%tir)
1937 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir)
1938
1939 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses',
1940 'MadLoop_makefile_definitions')
1941 if os.path.isfile(MadLoop_makefile_definitions):
1942 os.remove(MadLoop_makefile_definitions)
1943
1944 calls = self.write_loop_makefile_definitions(
1945 writers.MakefileWriter(MadLoop_makefile_definitions),
1946 link_tir_libs,tir_libs, tir_include=tir_include)
1947
1948
1949
1950 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone',
1951 "SubProcesses","MadLoopCommons.inc")).read()
1952 writer = writers.FortranWriter(os.path.join(self.dir_path,
1953 "SubProcesses","MadLoopCommons.f"))
1954 writer.writelines(MadLoopCommon%{
1955 'print_banner_commands':self.MadLoop_banner}, context={
1956 'collier_available':self.tir_available_dict['collier']})
1957 writer.close()
1958
1960 """ Does the same as the mother routine except that it also links
1961 coef_specs.inc in the HELAS folder."""
1962
1963 LoopProcessExporterFortranSA.link_files_from_Subprocesses(self,proc_name)
1964
1965
1966
1967 ln(os.path.join(self.dir_path,'Source','DHELAS','coef_specs.inc'),
1968 os.path.join(self.dir_path, 'SubProcesses', proc_name),
1969 abspath=False, cwd=None)
1970
1971
1972 - def link_TIR(self, targetPath,libpath,libname,tir_name='TIR'):
1973 """Link the TIR source directory inside the target path given
1974 in argument"""
1975
1976 if tir_name in ['pjfry','golem','samurai','ninja','collier']:
1977
1978 if (not isinstance(libpath,str)) or (not os.path.exists(libpath)) \
1979 or (not os.path.isfile(pjoin(libpath,libname))):
1980 if isinstance(libpath,str) and libpath != '' and \
1981 (not os.path.isfile(pjoin(libpath,libname))):
1982
1983 logger.warning("The %s reduction library could not be found"%tir_name\
1984 +" with PATH:%s specified in mg5_configuration.txt."%libpath\
1985 +" It will not be available.")
1986 self.tir_available_dict[tir_name]=False
1987 return ""
1988
1989 if tir_name in ['ninja','samurai'] and self.tir_available_dict[tir_name]:
1990
1991
1992 if os.path.isfile(pjoin(libpath,os.pardir,'AUTHORS')):
1993 try:
1994 version = open(pjoin(libpath,os.pardir,'VERSION'),'r').read()
1995 except IOError:
1996 version = None
1997 if version is None :
1998 logger.warning(
1999 "Your version of '%s' in \n %s\nseems too old %sto be compatible with MG5_aMC."
2000 %(tir_name, libpath ,'' if not version else '(v%s) '%version)+
2001 ("\nConsider updating it by hand or using the 'install' function of MG5_aMC." if tir_name!='samurai'
2002 else "\nAsk the authors for the latest version compatible with MG5_aMC."))
2003 else:
2004
2005 if (not isinstance(libpath,str)) or (not os.path.exists(libpath)):
2006
2007 logger.warning("The %s reduction library could not be found"%tir_name\
2008 +" with PATH:%s specified in mg5_configuration.txt."%libpath\
2009 +" It will not be available.")
2010 self.tir_available_dict[tir_name]=False
2011 return ""
2012
2013 if self.dependencies=='internal':
2014 if tir_name in ['pjfry','golem','samurai','ninja','collier']:
2015 self.tir_available_dict[tir_name]=False
2016 logger.info("When using the 'output_dependencies=internal' "+\
2017 " MG5_aMC option, the (optional) reduction library %s cannot be employed because"%tir_name+\
2018 " it is not distributed with the MG5_aMC code so that it cannot be copied locally.")
2019 return ""
2020 elif tir_name == "iregi":
2021
2022 new_iregi_path = pjoin(targetPath,os.path.pardir,'Source','IREGI')
2023 shutil.copytree(pjoin(libpath,os.path.pardir), new_iregi_path,
2024 symlinks=True)
2025
2026 current = misc.detect_current_compiler(
2027 pjoin(new_iregi_path,'src','makefile_ML5_lib'))
2028 new = 'gfortran' if self.fortran_compiler is None else \
2029 self.fortran_compiler
2030 if current != new:
2031 misc.mod_compilator(pjoin(new_iregi_path,'src'), new,current)
2032 misc.mod_compilator(pjoin(new_iregi_path,'src','oneloop'),
2033 new, current)
2034
2035
2036 ln(pjoin(targetPath,os.path.pardir,'Source','IREGI','src',
2037 libname),targetPath)
2038 else:
2039 logger.info("Tensor integral reduction library "+\
2040 "%s not implemented yet."%tir_name)
2041 return libpath
2042
2043 elif self.dependencies=='external':
2044 if not os.path.exists(pjoin(libpath,libname)) and tir_name=='iregi':
2045
2046 if 'heptools_install_dir' in self.opt and os.path.exists(pjoin(self.opt['heptools_install_dir'], 'IREGI')):
2047 misc.sprint('Going to use pre-compiled version of IREGI')
2048
2049 ln(os.path.join(self.opt['heptools_install_dir'],'IREGI','src','libiregi.a'),
2050 os.path.join(targetPath),abspath=True)
2051 return os.path.join(targetPath, 'libiregi.a')
2052
2053
2054
2055 logger.info('Compiling IREGI. This has to be done only once and'+\
2056 ' can take a couple of minutes.','$MG:BOLD')
2057
2058 current = misc.detect_current_compiler(os.path.join(\
2059 libpath,'makefile_ML5_lib'))
2060 new = 'gfortran' if self.fortran_compiler is None else \
2061 self.fortran_compiler
2062 if current != new:
2063 misc.mod_compilator(libpath, new,current)
2064 misc.mod_compilator(pjoin(libpath,'oneloop'), new, current)
2065
2066 misc.compile(cwd=libpath, job_specs = False)
2067
2068 if not os.path.exists(pjoin(libpath,libname)):
2069 logger.warning("IREGI could not be compiled. Check"+\
2070 "the compilation errors at %s. The related "%libpath+\
2071 "functionalities are turned off.")
2072 self.tir_available_dict[tir_name]=False
2073 return ""
2074
2075 if not tir_name in ['pjfry','golem','samurai','ninja','collier']:
2076 ln(os.path.join(libpath,libname),targetPath,abspath=True)
2077
2078 elif self.dependencies=='environment_paths':
2079
2080
2081 newlibpath = misc.which_lib(libname)
2082 if not newlibpath is None:
2083 logger.info('MG5_aMC is using %s installation found at %s.'%\
2084 (tir_name,newlibpath))
2085
2086 if not tir_name in ['pjfry','golem','samurai','ninja','collier']:
2087 ln(newlibpath,targetPath,abspath=True)
2088 self.tir_available_dict[tir_name]=True
2089 return os.path.dirname(newlibpath)
2090 else:
2091 logger.warning("Could not find the location of the file"+\
2092 " %s in you environment paths. The related "%libname+\
2093 "functionalities are turned off.")
2094 self.tir_available_dict[tir_name]=False
2095 return ""
2096
2097 self.tir_available_dict[tir_name]=True
2098 return libpath
2099
2101 """ Decides whether we must group loops or not for this matrix element"""
2102
2103
2104
2105 if self.forbid_loop_grouping:
2106 self.group_loops = False
2107 else:
2108 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\
2109 and matrix_element.get('processes')[0].get('has_born')
2110
2111 return self.group_loops
2112
2113 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
2119
2120
2121
2122 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model,
2123 group_number = None, proc_id = None, config_map = None):
2124 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f
2125 and loop_num.f only but with the optimized FortranModel.
2126 The arguments group_number and proc_id are just for the LoopInduced
2127 output with MadEvent and only used in get_ME_identifier."""
2128
2129
2130
2131 if writer:
2132 raise MadGraph5Error('Matrix output mode no longer supported.')
2133
2134 if not isinstance(fortran_model,\
2135 helas_call_writers.FortranUFOHelasCallWriter):
2136 raise MadGraph5Error('The optimized loop fortran output can only'+\
2137 ' work with a UFO Fortran model')
2138 OptimizedFortranModel=\
2139 helas_call_writers.FortranUFOHelasCallWriterOptimized(\
2140 fortran_model.get('model'),False)
2141
2142
2143 if not matrix_element.get('processes')[0].get('has_born') and \
2144 not self.compute_color_flows:
2145 logger.debug("Color flows will be employed despite the option"+\
2146 " 'loop_color_flows' being set to False because it is necessary"+\
2147 " for optimizations.")
2148
2149
2150
2151
2152
2153 matrix_element.compute_all_analytic_information(
2154 self.get_aloha_model(matrix_element.get('processes')[0].get('model')))
2155
2156 self.set_group_loops(matrix_element)
2157
2158
2159
2160 matrix_element.rep_dict = LoopProcessExporterFortranSA.\
2161 generate_general_replace_dict(self, matrix_element,
2162 group_number = group_number, proc_id = proc_id)
2163
2164
2165 self.set_optimized_output_specific_replace_dict_entries(matrix_element)
2166
2167
2168 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w')
2169 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix'])
2170 proc_prefix_writer.close()
2171
2172 filename = 'loop_matrix.f'
2173 calls = self.write_loopmatrix(writers.FortranWriter(filename),
2174 matrix_element,
2175 OptimizedFortranModel)
2176
2177 filename = 'check_sa.f'
2178 self.write_check_sa(writers.FortranWriter(filename),matrix_element)
2179
2180 filename = 'polynomial.f'
2181 calls = self.write_polynomial_subroutines(
2182 writers.FortranWriter(filename),
2183 matrix_element)
2184
2185 filename = 'improve_ps.f'
2186 calls = self.write_improve_ps(writers.FortranWriter(filename),
2187 matrix_element)
2188
2189 filename = 'CT_interface.f'
2190 self.write_CT_interface(writers.FortranWriter(filename),\
2191 matrix_element)
2192
2193 filename = 'TIR_interface.f'
2194 self.write_TIR_interface(writers.FortranWriter(filename),
2195 matrix_element)
2196
2197 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']:
2198 filename = 'GOLEM_interface.f'
2199 self.write_GOLEM_interface(writers.FortranWriter(filename),
2200 matrix_element)
2201
2202 if 'collier' in self.tir_available_dict and self.tir_available_dict['collier']:
2203 filename = 'COLLIER_interface.f'
2204 self.write_COLLIER_interface(writers.FortranWriter(filename),
2205 matrix_element)
2206
2207 filename = 'loop_num.f'
2208 self.write_loop_num(writers.FortranWriter(filename),\
2209 matrix_element,OptimizedFortranModel)
2210
2211 filename = 'mp_compute_loop_coefs.f'
2212 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\
2213 matrix_element,OptimizedFortranModel)
2214
2215 if self.get_context(matrix_element)['ComputeColorFlows']:
2216 filename = 'compute_color_flows.f'
2217 self.write_compute_color_flows(writers.FortranWriter(filename),
2218 matrix_element, config_map = config_map)
2219
2220
2221 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
2222 filename = 'nexternal.inc'
2223 self.write_nexternal_file(writers.FortranWriter(filename),
2224 nexternal, ninitial)
2225
2226
2227 filename = 'process_info.inc'
2228 self.write_process_info_file(writers.FortranWriter(filename),
2229 matrix_element)
2230
2231 if self.get_context(matrix_element)['TIRCaching']:
2232 filename = 'tir_cache_size.inc'
2233 self.write_tir_cache_size_include(writers.FortranWriter(filename))
2234
2235 return calls
2236
2238 """ Specify the entries of the replacement dictionary which are specific
2239 to the optimized output and only relevant to it (the more general entries
2240 are set in the the mother class LoopProcessExporterFortranSA."""
2241
2242 max_loop_rank=matrix_element.get_max_loop_rank()
2243 matrix_element.rep_dict['maxrank']=max_loop_rank
2244 matrix_element.rep_dict['loop_max_coefs']=\
2245 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank)
2246 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank()
2247 matrix_element.rep_dict['vertex_max_coefs']=\
2248 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank)
2249
2250 matrix_element.rep_dict['nloopwavefuncs']=\
2251 matrix_element.get_number_of_loop_wavefunctions()
2252 max_spin=matrix_element.get_max_loop_particle_spin()
2253
2254 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16
2255 matrix_element.rep_dict['nloops']=len(\
2256 [1 for ldiag in matrix_element.get_loop_diagrams() for \
2257 lamp in ldiag.get_loop_amplitudes()])
2258
2259 if self.set_group_loops(matrix_element):
2260 matrix_element.rep_dict['nloop_groups']=\
2261 len(matrix_element.get('loop_groups'))
2262 else:
2263 matrix_element.rep_dict['nloop_groups']=\
2264 matrix_element.rep_dict['nloops']
2265
2267 """ Create the file containing the core subroutine called by CutTools
2268 which contains the Helas calls building the loop"""
2269
2270 replace_dict=copy.copy(matrix_element.rep_dict)
2271
2272 file = open(os.path.join(self.template_dir,'loop_num.inc')).read()
2273 file = file % replace_dict
2274 writer.writelines(file,context=self.get_context(matrix_element))
2275
2280
2282 """ Create the file TIR_interface.f which does NOT contain the subroutine
2283 defining the loop HELAS-like calls along with the general interfacing
2284 subroutine. """
2285
2286
2287 replace_dict=copy.copy(matrix_element.rep_dict)
2288
2289 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read()
2290
2291
2292
2293 loop_groups = matrix_element.get('loop_groups')
2294 has_HEFT_vertex = [False]*len(loop_groups)
2295 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups):
2296 for lamp in loop_amp_list:
2297 final_lwf = lamp.get_final_loop_wavefunction()
2298 while not final_lwf is None:
2299
2300
2301 scalars = len([1 for wf in final_lwf.get('mothers') if
2302 wf.get('spin')==1])
2303 vectors = len([1 for wf in final_lwf.get('mothers') if
2304 wf.get('spin')==3 and wf.get('mass')=='ZERO'])
2305 if scalars>=1 and vectors>=1 and \
2306 scalars+vectors == len(final_lwf.get('mothers')):
2307 has_HEFT_vertex[i] = True
2308 break
2309 final_lwf = final_lwf.get_loop_mother()
2310 else:
2311 continue
2312 break
2313
2314 has_HEFT_list = []
2315 chunk_size = 9
2316 for k in range(0, len(has_HEFT_vertex), chunk_size):
2317 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \
2318 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)),
2319 ','.join(['.TRUE.' if l else '.FALSE.' for l in
2320 has_HEFT_vertex[k:k + chunk_size]])))
2321 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list)
2322
2323 file = file % replace_dict
2324
2325 FPR = q_polynomial.FortranPolynomialRoutines(
2326 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\
2327 sub_prefix=replace_dict['proc_prefix'])
2328 if self.tir_available_dict['pjfry']:
2329 file += '\n\n'+FPR.write_pjfry_mapping()
2330 if self.tir_available_dict['iregi']:
2331 file += '\n\n'+FPR.write_iregi_mapping()
2332
2333 if writer:
2334 writer.writelines(file,context=self.get_context(matrix_element))
2335 else:
2336 return file
2337
2339 """ Create the file COLLIER_interface.f"""
2340
2341
2342 replace_dict=copy.copy(matrix_element.rep_dict)
2343
2344 file = open(os.path.join(self.template_dir,'COLLIER_interface.inc')).read()
2345
2346 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\
2347 coef_format=replace_dict['complex_dp_format'],\
2348 sub_prefix=replace_dict['proc_prefix'])
2349 map_definition = []
2350 collier_map = FPR.get_COLLIER_mapping()
2351
2352 chunk_size = 10
2353 for map_name, indices_list in \
2354 [('COEFMAP_ZERO',[c[0] for c in collier_map]),
2355 ('COEFMAP_ONE',[c[1] for c in collier_map]),
2356 ('COEFMAP_TWO',[c[2] for c in collier_map]),
2357 ('COEFMAP_THREE',[c[3] for c in collier_map])]:
2358 for k in range(0, len(indices_list), chunk_size):
2359 map_definition.append("DATA (%s(I),I=%3r,%3r) /%s/" % \
2360 (map_name,k, min(k + chunk_size, len(indices_list))-1,
2361 ','.join('%2r'%ind for ind in indices_list[k:k + chunk_size])))
2362
2363 replace_dict['collier_coefmap'] = '\n'.join(map_definition)
2364
2365 file = file % replace_dict
2366
2367 if writer:
2368 writer.writelines(file,context=self.get_context(matrix_element))
2369 else:
2370 return file
2371
2373 """ Create the file GOLEM_interface.f which does NOT contain the subroutine
2374 defining the loop HELAS-like calls along with the general interfacing
2375 subroutine. """
2376
2377
2378 replace_dict=copy.copy(matrix_element.rep_dict)
2379
2380
2381
2382 if not self.get_context(matrix_element)['AmplitudeReduction']:
2383 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX'
2384 else:
2385 replace_dict['loop_induced_sqsoindex']=''
2386
2387 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read()
2388
2389 file = file % replace_dict
2390
2391 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\
2392 coef_format=replace_dict['complex_dp_format'],\
2393 sub_prefix=replace_dict['proc_prefix'])
2394
2395 file += '\n\n'+FPR.write_golem95_mapping()
2396
2397 if writer:
2398 writer.writelines(file,context=self.get_context(matrix_element))
2399 else:
2400 return file
2401
2403 """ Subroutine to create all the subroutines relevant for handling
2404 the polynomials representing the loop numerator """
2405
2406
2407 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w')
2408 IncWriter.writelines("""INTEGER LOOPMAXCOEFS
2409 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)"""
2410 %matrix_element.rep_dict)
2411
2412
2413
2414
2415
2416 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc')
2417 if not os.path.isfile(coef_specs_path):
2418 IncWriter=writers.FortranWriter(coef_specs_path,'w')
2419 IncWriter.writelines("""INTEGER MAXLWFSIZE
2420 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d)
2421 INTEGER VERTEXMAXCOEFS
2422 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\
2423 %matrix_element.rep_dict)
2424 IncWriter.close()
2425
2426
2427 subroutines=[]
2428
2429
2430 replace_dict = copy.copy(matrix_element.rep_dict)
2431
2432 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read()
2433 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read()
2434
2435
2436 replace_dict['complex_format'] = replace_dict['complex_dp_format']
2437 replace_dict['real_format'] = replace_dict['real_dp_format']
2438 replace_dict['mp_prefix'] = ''
2439 replace_dict['kind'] = 8
2440 replace_dict['zero_def'] = '0.0d0'
2441 replace_dict['one_def'] = '1.0d0'
2442 dp_routine = dp_routine % replace_dict
2443
2444 replace_dict['complex_format'] = replace_dict['complex_mp_format']
2445 replace_dict['real_format'] = replace_dict['real_mp_format']
2446 replace_dict['mp_prefix'] = 'MP_'
2447 replace_dict['kind'] = 16
2448 replace_dict['zero_def'] = '0.0e0_16'
2449 replace_dict['one_def'] = '1.0e0_16'
2450 mp_routine = mp_routine % replace_dict
2451 subroutines.append(dp_routine)
2452 subroutines.append(mp_routine)
2453
2454
2455 poly_writer=q_polynomial.FortranPolynomialRoutines(
2456 matrix_element.get_max_loop_rank(),
2457 updater_max_rank = matrix_element.get_max_loop_vertex_rank(),
2458 sub_prefix=replace_dict['proc_prefix'],
2459 proc_prefix=replace_dict['proc_prefix'],
2460 mp_prefix='')
2461
2462 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n')
2463
2464 mp_poly_writer=q_polynomial.FortranPolynomialRoutines(
2465 matrix_element.get_max_loop_rank(),
2466 updater_max_rank = matrix_element.get_max_loop_vertex_rank(),
2467 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'],
2468 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_')
2469
2470 subroutines.append(poly_writer.write_polynomial_evaluator())
2471 subroutines.append(mp_poly_writer.write_polynomial_evaluator())
2472
2473 subroutines.append(poly_writer.write_add_coefs())
2474 subroutines.append(mp_poly_writer.write_add_coefs())
2475
2476 subroutines.append(poly_writer.write_wl_merger())
2477 subroutines.append(mp_poly_writer.write_wl_merger())
2478 for wl_update in matrix_element.get_used_wl_updates():
2479
2480
2481
2482
2483 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0:
2484
2485
2486
2487 subroutines.append(poly_writer.write_expanded_wl_updater(\
2488 wl_update[0],wl_update[1]))
2489 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\
2490 wl_update[0],wl_update[1]))
2491 elif wl_update[0] >= wl_update[1]:
2492
2493
2494
2495
2496 subroutines.append(poly_writer.write_compact_wl_updater(\
2497 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True))
2498 subroutines.append(mp_poly_writer.write_compact_wl_updater(\
2499 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True))
2500 else:
2501
2502
2503
2504
2505
2506
2507 subroutines.append(poly_writer.write_compact_wl_updater(\
2508 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False))
2509 subroutines.append(mp_poly_writer.write_compact_wl_updater(\
2510 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False))
2511
2512 writer.writelines('\n\n'.join(subroutines),
2513 context=self.get_context(matrix_element))
2514
2516 """Create the write_mp_compute_loop_coefs.f file."""
2517
2518 if not matrix_element.get('processes') or \
2519 not matrix_element.get('diagrams'):
2520 return 0
2521
2522
2523
2524 writers.FortranWriter.downcase = False
2525
2526 replace_dict = copy.copy(matrix_element.rep_dict)
2527
2528
2529 squared_orders = matrix_element.get_squared_order_contribs()
2530 split_orders = matrix_element.get('processes')[0].get('split_orders')
2531
2532 born_ct_helas_calls , uvct_helas_calls = \
2533 fortran_model.get_born_ct_helas_calls(matrix_element,
2534 squared_orders=squared_orders, split_orders=split_orders)
2535 self.turn_to_mp_calls(born_ct_helas_calls)
2536 self.turn_to_mp_calls(uvct_helas_calls)
2537 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\
2538 matrix_element,group_loops=self.group_loops,
2539 squared_orders=squared_orders,split_orders=split_orders)
2540
2541 coef_construction = [c % matrix_element.rep_dict for c
2542 in coef_construction]
2543 self.turn_to_mp_calls(coef_construction)
2544 self.turn_to_mp_calls(coef_merging)
2545
2546 file = open(os.path.join(self.template_dir,\
2547 'mp_compute_loop_coefs.inc')).read()
2548
2549
2550
2551 context = self.get_context(matrix_element)
2552 file=self.split_HELASCALLS(writer,replace_dict,\
2553 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\
2554 'mp_born_ct_helas_calls','mp_helas_calls_ampb',
2555 required_so_broadcaster = 'MP_CT_REQ_SO_DONE',
2556 continue_label = 2000,
2557 momenta_array_name = 'MP_P',
2558 context=context)
2559 file=self.split_HELASCALLS(writer,replace_dict,\
2560 'mp_helas_calls_split.inc',file,uvct_helas_calls,\
2561 'mp_uvct_helas_calls','mp_helas_calls_uvct',
2562 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE',
2563 continue_label = 3000,
2564 momenta_array_name = 'MP_P',
2565 context=context)
2566 file=self.split_HELASCALLS(writer,replace_dict,\
2567 'mp_helas_calls_split.inc',file,coef_construction,\
2568 'mp_coef_construction','mp_coef_construction',
2569 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE',
2570 continue_label = 4000,
2571 momenta_array_name = 'MP_P',
2572 context=context)
2573
2574 replace_dict['mp_coef_merging']='\n'.join(coef_merging)
2575
2576 file = file % replace_dict
2577
2578
2579 writer.writelines(file,context=context)
2580
2582 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding
2583 to the color coefficients for JAMP(L|B)*JAMP(L|B)."""
2584
2585 res = []
2586 for line in range(len(col_matrix._col_basis1)):
2587 numerators = []
2588 denominators = []
2589 for row in range(len(col_matrix._col_basis2)):
2590 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)]
2591 numerators.append('%6r'%coeff[0].numerator)
2592 denominators.append('%6r'%(
2593 coeff[0].denominator*(-1 if coeff[1] else 1)))
2594 res.append(' '.join(numerators))
2595 res.append(' '.join(denominators))
2596
2597 res.append('EOF')
2598
2599 writer.writelines('\n'.join(res))
2600
2603 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients
2604 list of the color_amplitudes in the argument of this function."""
2605
2606 my_cs = color.ColorString()
2607
2608 res = []
2609
2610 for jamp_number, coeff_list in enumerate(color_amplitudes):
2611 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number])
2612
2613 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0]
2614 res.append('%d # Coefficient for flow number %d with expr. %s'\
2615 %(len(coeff_list), jamp_number+1, repr(ordered_cs)))
2616
2617 line_element = []
2618
2619 for (coefficient, amp_number) in coeff_list:
2620 coef = self.cat_coeff(\
2621 coefficient[0],coefficient[1],coefficient[2],coefficient[3])
2622 line_element.append((coef[0].numerator,
2623 coef[0].denominator*(-1 if coef[1] else 1),amp_number))
2624
2625 line_element.sort(key=lambda el:el[2])
2626
2627 for i in range(3):
2628 res.append(' '.join('%6r'%elem[i] for elem in line_element))
2629
2630 res.append('EOF')
2631 writer.writelines('\n'.join(res))
2632
2634 """Writes the file compute_color_flows.f which uses the AMPL results
2635 from a common block to project them onto the color flow space so as
2636 to compute the JAMP quantities. For loop induced processes, this file
2637 will also contain a subroutine computing AMPL**2 for madevent
2638 multichanneling."""
2639
2640 loop_col_amps = matrix_element.get_loop_color_amplitudes()
2641 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps)
2642
2643 dat_writer = open(pjoin('..','MadLoop5_resources',
2644 '%(proc_prefix)sLoopColorFlowCoefs.dat'
2645 %matrix_element.rep_dict),'w')
2646 self.write_color_flow_coefs_data_file(dat_writer,
2647 loop_col_amps, matrix_element.get('loop_color_basis'))
2648 dat_writer.close()
2649
2650 dat_writer = open(pjoin('..','MadLoop5_resources',
2651 '%(proc_prefix)sLoopColorFlowMatrix.dat'
2652 %matrix_element.rep_dict),'w')
2653 self.write_color_matrix_data_file(dat_writer,
2654 matrix_element.get('color_matrix'))
2655 dat_writer.close()
2656
2657 if matrix_element.get('processes')[0].get('has_born'):
2658 born_col_amps = matrix_element.get_born_color_amplitudes()
2659 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps)
2660 dat_writer = open(pjoin('..','MadLoop5_resources',
2661 '%(proc_prefix)sBornColorFlowCoefs.dat'
2662 %matrix_element.rep_dict),'w')
2663 self.write_color_flow_coefs_data_file(dat_writer,
2664 born_col_amps, matrix_element.get('born_color_basis'))
2665 dat_writer.close()
2666
2667 dat_writer = open(pjoin('..','MadLoop5_resources',
2668 '%(proc_prefix)sBornColorFlowMatrix.dat'
2669 %matrix_element.rep_dict),'w')
2670 self.write_color_matrix_data_file(dat_writer,
2671 color_amp.ColorMatrix(matrix_element.get('born_color_basis')))
2672 dat_writer.close()
2673 else:
2674 matrix_element.rep_dict['nBornFlows'] = 0
2675
2676 replace_dict = copy.copy(matrix_element.rep_dict)
2677
2678
2679
2680 if self.get_context(matrix_element)['MadEventOutput']:
2681 self.get_amp2_lines(matrix_element, replace_dict, config_map)
2682 else:
2683 replace_dict['config_map_definition'] = ''
2684 replace_dict['config_index_map_definition'] = ''
2685 replace_dict['nmultichannels'] = 0
2686 replace_dict['nmultichannel_configs'] = 0
2687
2688
2689
2690 matrix_element.rep_dict['nmultichannels'] = \
2691 replace_dict['nmultichannels']
2692 matrix_element.rep_dict['nmultichannel_configs'] = \
2693 replace_dict['nmultichannel_configs']
2694
2695
2696 file = open(os.path.join(self.template_dir,\
2697 'compute_color_flows.inc')).read()%replace_dict
2698
2699 writer.writelines(file,context=self.get_context(matrix_element))
2700
2702 """ From the list of matrix element, or the single matrix element, derive
2703 the global quantities to write in global_coef_specs.inc"""
2704
2705 if isinstance(matrix_element_list, (group_subprocs.SubProcessGroupList,
2706 loop_helas_objects.LoopHelasProcess)):
2707 matrix_element_list = matrix_element_list.get_matrix_elements()
2708
2709 if isinstance(matrix_element_list, list):
2710 me_list = matrix_element_list
2711 else:
2712 me_list = [matrix_element_list]
2713
2714 if output_path is None:
2715 out_path = pjoin(self.dir_path,'SubProcesses','global_specs.inc')
2716 else:
2717 out_path = output_path
2718
2719 open(out_path,'w').write(
2720 """ integer MAXNEXTERNAL
2721 parameter(MAXNEXTERNAL=%d)
2722 integer OVERALLMAXRANK
2723 parameter(OVERALLMAXRANK=%d)
2724 integer NPROCS
2725 parameter(NPROCS=%d)"""%(
2726 max(me.get_nexternal_ninitial()[0] for me in me_list),
2727 max(me.get_max_loop_rank() for me in me_list),
2728 len(me_list)))
2729
2730
2731 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2732 """ If processes with different maximum loop wavefunction size or
2733 different maximum loop vertex rank have to be output together, then
2734 the file 'coef.inc' in the HELAS Source folder must contain the overall
2735 maximum of these quantities. It is not safe though, and the user has
2736 been appropriatly warned at the output stage """
2737
2738
2739 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\
2740 'coef_specs.inc')
2741 os.remove(coef_specs_path)
2742
2743 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16}
2744 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin]
2745 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank(
2746 overall_max_loop_vert_rank)
2747
2748 IncWriter=writers.FortranWriter(coef_specs_path,'w')
2749 IncWriter.writelines("""INTEGER MAXLWFSIZE
2750 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d)
2751 INTEGER VERTEXMAXCOEFS
2752 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\
2753 %{'max_lwf_size':overall_max_lwf_size,
2754 'vertex_max_coefs':overall_max_loop_vert_coefs})
2755 IncWriter.close()
2756
2759 """ Sets up the replacement dictionary for the writeout of the steering
2760 file check_sa.f"""
2761 if len(squared_orders)<1:
2762 matrix_element.rep_dict['print_so_loop_results']=\
2763 "write(*,*) 'No split orders defined.'"
2764 elif len(squared_orders)==1:
2765 matrix_element.rep_dict['set_coupling_target']=''
2766 matrix_element.rep_dict['print_so_loop_results']=\
2767 "write(*,*) 'All loop contributions are of split orders (%s)'"%(
2768 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \
2769 for i in range(len(split_orders))]))
2770 else:
2771 matrix_element.rep_dict['set_coupling_target']='\n'.join([
2772 '# Here we leave the default target squared split order to -1, meaning that we'+
2773 ' aim at computing all individual contributions. You can choose otherwise.',
2774 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict])
2775 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([
2776 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join(
2777 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))),
2778 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1),
2779 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1),
2780 "ELSE",
2781 "write(*,*) ' > accuracy = NA'",
2782 "ENDIF",
2783 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1),
2784 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1),
2785 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1)
2786 ]) for j, so in enumerate(squared_orders)])
2787 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join(
2788 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+
2789 ['\n'.join([
2790 "write (69,*) 'Loop_SO_Results %s'"%(' '.join(
2791 ['%d'%so_value for so_value in so])),
2792 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1),
2793 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1),
2794 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1),
2795 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1),
2796 ]) for j, so in enumerate(squared_orders)])
2797
2798
2799 squared_born_so_orders = []
2800 for i, amp_order in enumerate(amps_orders['born_amp_orders']):
2801 for j in range(0,i+1):
2802 key = tuple([ord1 + ord2 for ord1,ord2 in \
2803 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])])
2804 if not key in squared_born_so_orders:
2805 squared_born_so_orders.append(key)
2806 if len(squared_born_so_orders)<1:
2807 matrix_element.rep_dict['print_so_born_results'] = ''
2808 elif len(squared_born_so_orders)==1:
2809 matrix_element.rep_dict['print_so_born_results'] = \
2810 "write(*,*) 'All Born contributions are of split orders (%s)'"%(
2811 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i])
2812 for i in range(len(split_orders))]))
2813 else:
2814 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([
2815 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join(
2816 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1)
2817 for j, so in enumerate(squared_born_so_orders)])
2818 matrix_element.rep_dict['write_so_born_results'] = '\n'.join(
2819 ['\n'.join([
2820 "write (69,*) 'Born_SO_Results %s'"%(' '.join(
2821 ['%d'%so_value for so_value in so])),
2822 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1),
2823 ]) for j, so in enumerate(squared_born_so_orders)])
2824
2825
2826 matrix_element.rep_dict['print_so_born_results'] += \
2827 '\nwrite (*,*) "---------------------------------"'
2828 matrix_element.rep_dict['print_so_loop_results'] += \
2829 '\nwrite (*,*) "---------------------------------"'
2830
2832 """Write the file 'tir_cache_size.inc' which sets the size of the TIR
2833 cache the the user wishes to employ and the default value for it.
2834 This can have an impact on MadLoop speed when using stability checks
2835 but also impacts in a non-negligible way MadLoop's memory footprint.
2836 It is therefore important that the user can chose its size."""
2837
2838
2839
2840
2841
2842 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)"
2843 writer.writelines(tir_cach_size)
2844
2845 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \
2846 write_auxiliary_files=True,):
2847 """Create the loop_matrix.f file."""
2848
2849 if not matrix_element.get('processes') or \
2850 not matrix_element.get('diagrams'):
2851 return 0
2852
2853
2854 writers.FortranWriter.downcase = False
2855
2856
2857
2858
2859
2860
2861
2862 squared_orders, amps_orders = matrix_element.get_split_orders_mapping()
2863
2864
2865
2866 sqso_contribs = [sqso[0] for sqso in squared_orders]
2867 split_orders = matrix_element.get('processes')[0].get('split_orders')
2868
2869
2870
2871 self.setup_check_sa_replacement_dictionary(matrix_element,
2872 split_orders,sqso_contribs,amps_orders)
2873
2874
2875
2876 overall_so_basis = list(set(
2877 [born_so[0] for born_so in amps_orders['born_amp_orders']]+
2878 [born_so[0] for born_so in amps_orders['loop_amp_orders']]))
2879
2880 order_hierarchy = matrix_element.get('processes')[0]\
2881 .get('model').get('order_hierarchy')
2882 if set(order_hierarchy.keys()).union(set(split_orders))==\
2883 set(order_hierarchy.keys()):
2884 overall_so_basis.sort(key= lambda so:
2885 sum([order_hierarchy[split_orders[i]]*order_power for \
2886 i, order_power in enumerate(so)]))
2887
2888
2889
2890 matrix_element.rep_dict['split_order_str_list'] = str(split_orders)
2891 matrix_element.rep_dict['nSO'] = len(split_orders)
2892 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs)
2893 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis)
2894
2895 writers.FortranWriter('nsquaredSO.inc').writelines(
2896 """INTEGER NSQUAREDSO
2897 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO'])
2898
2899 replace_dict = copy.copy(matrix_element.rep_dict)
2900
2901
2902 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\
2903 overall_so_basis,'AMPSPLITORDERS'))
2904 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\
2905 sqso_contribs,'SQPLITORDERS'))
2906
2907
2908 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index(
2909 matrix_element.get('processes')[0],sqso_contribs)
2910
2911
2912
2913 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders'])
2914 for SO in amps_orders['loop_amp_orders']:
2915 for amp_number in SO[1]:
2916 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1
2917
2918 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list(
2919 ampSO_list,'LOOPAMPORDERS'))
2920 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders'])
2921 for SO in amps_orders['born_amp_orders']:
2922 for amp_number in SO[1]:
2923 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1
2924 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list(
2925 ampSO_list,'BORNAMPORDERS'))
2926
2927
2928
2929 looplibs_av=['.TRUE.']
2930
2931
2932 for tir_lib in ['pjfry','iregi','golem','samurai','ninja','collier']:
2933 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \
2934 self.tir_available_dict[tir_lib] else '.FALSE.')
2935 replace_dict['data_looplibs_av']=','.join(looplibs_av)
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945 replace_dict['hel_offset'] = 10000
2946
2947
2948
2949 den_factor_line = self.get_den_factor_line(matrix_element)
2950 replace_dict['den_factor_line'] = den_factor_line
2951
2952
2953
2954 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor()
2955 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\
2956 matrix_element.get_beams_hel_avg_factor()
2957
2958 if write_auxiliary_files:
2959
2960 (CMNum,CMDenom) = self.get_color_matrix(matrix_element)
2961 CMWriter=open(pjoin('..','MadLoop5_resources',
2962 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w')
2963 for ColorLine in CMNum:
2964 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n')
2965 CMWriter.close()
2966 CMWriter=open(pjoin('..','MadLoop5_resources',
2967 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w')
2968 for ColorLine in CMDenom:
2969 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n')
2970 CMWriter.close()
2971
2972
2973 HelConfigs=matrix_element.get_helicity_matrix()
2974 HelConfigWriter=open(pjoin('..','MadLoop5_resources',
2975 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w')
2976 for HelConfig in HelConfigs:
2977 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n')
2978 HelConfigWriter.close()
2979
2980
2981 born_ct_helas_calls, uvct_helas_calls = \
2982 fortran_model.get_born_ct_helas_calls(matrix_element,
2983 squared_orders=squared_orders,split_orders=split_orders)
2984 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\
2985 matrix_element,group_loops=self.group_loops,
2986 squared_orders=squared_orders,split_orders=split_orders)
2987
2988 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\
2989 group_loops=self.group_loops,
2990 squared_orders=squared_orders, split_orders=split_orders)
2991
2992 coef_construction = [c % matrix_element.rep_dict for c
2993 in coef_construction]
2994 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls]
2995
2996 file = open(os.path.join(self.template_dir,\
2997 'loop_matrix_standalone.inc')).read()
2998
2999
3000
3001 context = self.get_context(matrix_element)
3002 file=self.split_HELASCALLS(writer,replace_dict,\
3003 'helas_calls_split.inc',file,born_ct_helas_calls,\
3004 'born_ct_helas_calls','helas_calls_ampb',
3005 required_so_broadcaster = 'CT_REQ_SO_DONE',
3006 continue_label = 2000, context = context)
3007 file=self.split_HELASCALLS(writer,replace_dict,\
3008 'helas_calls_split.inc',file,uvct_helas_calls,\
3009 'uvct_helas_calls','helas_calls_uvct',
3010 required_so_broadcaster = 'UVCT_REQ_SO_DONE',
3011 continue_label = 3000, context=context)
3012 file=self.split_HELASCALLS(writer,replace_dict,\
3013 'helas_calls_split.inc',file,coef_construction,\
3014 'coef_construction','coef_construction',
3015 required_so_broadcaster = 'LOOP_REQ_SO_DONE',
3016 continue_label = 4000, context=context)
3017 file=self.split_HELASCALLS(writer,replace_dict,\
3018 'helas_calls_split.inc',file,loop_CT_calls,\
3019 'loop_CT_calls','loop_CT_calls',
3020 required_so_broadcaster = 'CTCALL_REQ_SO_DONE',
3021 continue_label = 5000, context=context)
3022
3023
3024
3025 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls']
3026 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls']
3027 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls']
3028 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction']
3029
3030 replace_dict['coef_merging']='\n'.join(coef_merging)
3031 file = file % replace_dict
3032 number_of_calls = len([call for call in loop_CT_calls if call.find('CALL LOOP') != 0])
3033 if writer:
3034
3035 writer.writelines(file,context=context)
3036 return number_of_calls
3037 else:
3038
3039 return number_of_calls, file
3040
3041
3042
3043
3046 """Class to take care of exporting a set of loop matrix elements in the
3047 Fortran format."""
3048
3049 default_opt = {'clean': False, 'complex_mass':False,
3050 'export_format':'madloop_matchbox', 'mp':True,
3051 'loop_dir':'', 'cuttools_dir':'',
3052 'fortran_compiler':'gfortran',
3053 'output_dependencies':'external',
3054 'sa_symmetry':True}
3055
3056
3057
3063
3064
3069
3070 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
3071 """ To not mix notations between borns and virtuals we call it here also MG5 """
3072 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
3073
3074
3075
3076
3077
3079 """ A class to specify all the functions common to LoopInducedExporterMEGroup
3080 and LoopInducedExporterMENoGroup (but not relevant for the original
3081 Madevent exporters)"""
3082
3083 madloop_makefile_name = 'makefile_MadLoop'
3084
3085
3087 """ Initialize the process, setting the proc characteristics."""
3088 super(LoopInducedExporterME, self).__init__(*args, **opts)
3089 self.proc_characteristic['loop_induced'] = True
3090
3091 - def get_context(self,*args,**opts):
3092 """ Make sure that the contextual variable MadEventOutput is set to
3093 True for this exporter"""
3094
3095 context = super(LoopInducedExporterME,self).get_context(*args,**opts)
3096 context['MadEventOutput'] = True
3097 return context
3098
3099
3100
3101
3103 """ write an equivalent of the MG4 proc_card in order that all the Madevent
3104 Perl script of MadEvent4 are still working properly for pure MG5 run.
3105 Not needed for StandAlone so we need to call the correct one
3106 """
3107
3108 return export_v4.ProcessExporterFortran.write_procdef_mg5(
3109 self, file_pos, modelname, process_str)
3110
3112 """ Returns the list of libraries to be compiling when compiling the
3113 SOURCE directory. It is different for loop_induced processes and
3114 also depends on the value of the 'output_dependencies' option"""
3115
3116 libraries_list = super(LoopInducedExporterME,self).\
3117 get_source_libraries_list()
3118
3119 if self.dependencies=='internal':
3120 libraries_list.append('$(LIBDIR)libcts.$(libext)')
3121 libraries_list.append('$(LIBDIR)libiregi.$(libext)')
3122
3123 return libraries_list
3124
3131
3142
3143
3144
3145
3146
3147 - def finalize(self, matrix_elements, history, mg5options, flaglist):
3148 """Function to finalize v4 directory, for inheritance.
3149 """
3150
3151 self.proc_characteristic['loop_induced'] = True
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161 self.write_global_specs(matrix_elements)
3162
3164 """Write the file 'tir_cache_size.inc' which sets the size of the TIR
3165 cache the the user wishes to employ and the default value for it.
3166 This can have an impact on MadLoop speed when using stability checks
3167 but also impacts in a non-negligible way MadLoop's memory footprint.
3168 It is therefore important that the user can chose its size."""
3169
3170
3171
3172
3173
3174 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)"
3175 writer.writelines(tir_cach_size)
3176
3177 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model,
3178 proc_id = None, config_map = [], subproc_number = None):
3179 """ Write it the wrapper to call the ML5 subroutine in the library."""
3180
3181
3182 if not matrix_element.get('processes') or \
3183 not matrix_element.get('diagrams'):
3184 return 0
3185
3186 if not isinstance(writer, writers.FortranWriter):
3187 raise writers.FortranWriter.FortranWriterError(\
3188 "writer not FortranWriter")
3189
3190 replace_dict = copy.copy(matrix_element.rep_dict)
3191
3192
3193 info_lines = self.get_mg5_info_lines()
3194 replace_dict['info_lines'] = info_lines
3195
3196
3197 process_lines = self.get_process_info_lines(matrix_element)
3198 replace_dict['process_lines'] = process_lines
3199
3200
3201
3202
3203
3204 if proc_id is None:
3205 replace_dict['proc_id'] = ''
3206 else:
3207 replace_dict['proc_id'] = proc_id
3208
3209
3210 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor()
3211 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\
3212 matrix_element.get_beams_hel_avg_factor()
3213
3214
3215 helicity_lines = self.get_helicity_lines(matrix_element)
3216 replace_dict['helicity_lines'] = helicity_lines
3217
3218
3219
3220 ndiags = len(matrix_element.get('diagrams'))
3221 replace_dict['ndiags'] = ndiags
3222
3223
3224 replace_dict['define_iconfigs_lines'] = \
3225 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG
3226 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG"""
3227
3228 if proc_id:
3229
3230
3231 replace_dict['define_iconfigs_lines'] += \
3232 """\nINTEGER SUBDIAG(MAXSPROC),IB(2)
3233 COMMON/TO_SUB_DIAG/SUBDIAG,IB"""
3234
3235 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id
3236 else:
3237
3238
3239 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)"
3240
3241
3242 replace_dict['ml_prefix'] = \
3243 self.get_ME_identifier(matrix_element, subproc_number, proc_id)
3244
3245
3246 ncolor = max(1, len(matrix_element.get('color_basis')))
3247 replace_dict['ncolor'] = ncolor
3248
3249 n_tot_diags = len(matrix_element.get_loop_diagrams())
3250 replace_dict['n_tot_diags'] = n_tot_diags
3251
3252 file = open(pjoin(_file_path, \
3253 'iolibs/template_files/%s' % self.matrix_file)).read()
3254 file = file % replace_dict
3255
3256
3257 writer.writelines(file)
3258
3259 return 0, ncolor
3260
3262 """Make sure the function is implemented in the daughters"""
3263
3264 raise NotImplemented('The function get_amp2_lines must be called in '+\
3265 ' the daugthers of LoopInducedExporterME')
3266
3267
3268
3269
3272 """Class to take care of exporting a set of grouped loop induced matrix
3273 elements"""
3274
3275 matrix_file = "matrix_loop_induced_madevent_group.inc"
3276
3282
3289
3301
3315
3318 """Generate the Pn directory for a subprocess group in MadEvent,
3319 including the necessary matrix_N.f files, configs.inc and various
3320 other helper files"""
3321
3322
3323 calls = 0
3324 matrix_elements = subproc_group.get('matrix_elements')
3325 for ime, matrix_element in enumerate(matrix_elements):
3326 self.unique_id +=1
3327 calls += self.generate_loop_subprocess(matrix_element,fortran_model,
3328 group_number = group_number, proc_id = str(ime+1),
3329
3330 config_map = subproc_group.get('diagram_maps')[ime],
3331 unique_id=self.unique_id)
3332
3333
3334 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory(
3335 self, subproc_group,fortran_model,group_number)
3336
3337 return calls
3338
3340 """Return the various replacement dictionary inputs necessary for the
3341 multichanneling amp2 definition for the loop-induced MadEvent output.
3342 """
3343
3344 if not config_map:
3345 raise MadGraph5Error('A multi-channeling configuration map is '+\
3346 ' necessary for the MadEvent Loop-induced output with grouping.')
3347
3348 nexternal, ninitial = matrix_element.get_nexternal_ninitial()
3349
3350 ret_lines = []
3351
3352
3353
3354 if isinstance(matrix_element, loop_helas_objects.LoopHelasMatrixElement):
3355 diagrams = matrix_element.get_loop_diagrams()
3356 else:
3357 diagrams = matrix_element.get('diagrams')
3358
3359
3360
3361
3362
3363 config_index_map = {}
3364
3365
3366 loop_amp_ID_to_config = {}
3367
3368
3369 config_to_diag_dict = {}
3370 for idiag, diag in enumerate(diagrams):
3371 try:
3372 config_to_diag_dict[config_map[idiag]].append(idiag)
3373 except KeyError:
3374 config_to_diag_dict[config_map[idiag]] = [idiag]
3375
3376 for config in sorted(config_to_diag_dict.keys()):
3377 config_index_map[config] = (config_to_diag_dict[config][0] + 1)
3378
3379
3380
3381 CT_amp_numbers = [a.get('number') for a in \
3382 sum([diagrams[idiag].get_ct_amplitudes() for \
3383 idiag in config_to_diag_dict[config]], [])]
3384
3385 for CT_amp_number in CT_amp_numbers:
3386 loop_amp_ID_to_config[CT_amp_number] = config
3387
3388
3389 loop_amp_numbers = [a.get('amplitudes')[0].get('number')
3390 for a in sum([diagrams[idiag].get_loop_amplitudes() for \
3391 idiag in config_to_diag_dict[config]], [])]
3392
3393 for loop_amp_number in loop_amp_numbers:
3394 loop_amp_ID_to_config[loop_amp_number] = config
3395
3396
3397
3398
3399
3400
3401 n_configs = max(config_index_map.keys())
3402 replace_dict['nmultichannel_configs'] = n_configs
3403
3404
3405
3406 conf_list = [(config_index_map[i] if i in config_index_map else 0) \
3407 for i in range(1,n_configs+1)]
3408
3409
3410 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0])
3411
3412
3413 res_list = []
3414 chunk_size = 6
3415 for k in range(0, len(conf_list), chunk_size):
3416 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \
3417 (k + 1, min(k + chunk_size, len(conf_list)),
3418 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]])))
3419
3420 replace_dict['config_index_map_definition'] = '\n'.join(res_list)
3421
3422 res_list = []
3423 n_loop_amps = max(loop_amp_ID_to_config.keys())
3424 amp_list = [loop_amp_ID_to_config[i] for i in \
3425 sorted(loop_amp_ID_to_config.keys()) if i!=0]
3426 chunk_size = 6
3427 for k in range(0, len(amp_list), chunk_size):
3428 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \
3429 (k + 1, min(k + chunk_size, len(amp_list)),
3430 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]])))
3431
3432 replace_dict['config_map_definition'] = '\n'.join(res_list)
3433
3434 return
3435
3436
3437
3438
3441 """Class to take care of exporting a set of individual loop induced matrix
3442 elements"""
3443
3444 matrix_file = "matrix_loop_induced_madevent.inc"
3445
3451
3458
3470
3483
3485 """Generate the Pn directory for a subprocess group in MadEvent,
3486 including the necessary matrix_N.f files, configs.inc and various
3487 other helper files"""
3488
3489 self.unique_id += 1
3490
3491 calls = self.generate_loop_subprocess(matrix_element,fortran_model,
3492 group_number = me_number,
3493 unique_id=self.unique_id)
3494
3495
3496
3497 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory(
3498 self, matrix_element, fortran_model, me_number)
3499 return calls
3500
3502 """Return the amp2(i) = sum(amp for diag(i))^2 lines"""
3503
3504 if config_map:
3505 raise MadGraph5Error('A configuration map should not be specified'+\
3506 ' for the Loop induced exporter without grouping.')
3507
3508 nexternal, ninitial = matrix_element.get_nexternal_ninitial()
3509
3510 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \
3511 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]]
3512 minvert = min(vert_list) if vert_list!=[] else 0
3513
3514
3515
3516
3517
3518 config_index_map = {}
3519
3520
3521 loop_amp_ID_to_config = {}
3522
3523 n_configs = 0
3524 for idiag, diag in enumerate(matrix_element.get('diagrams')):
3525
3526 use_for_multichanneling = True
3527 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert:
3528 use_for_multichanneling = False
3529 curr_config = 0
3530 else:
3531 n_configs += 1
3532 curr_config = n_configs
3533
3534 if not use_for_multichanneling:
3535 if 0 not in config_index_map:
3536 config_index_map[0] = idiag + 1
3537 else:
3538 config_index_map[curr_config] = idiag + 1
3539
3540 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()]
3541 for CT_amp in CT_amps:
3542 loop_amp_ID_to_config[CT_amp] = curr_config
3543
3544 Loop_amps = [a.get('amplitudes')[0].get('number')
3545 for a in diag.get_loop_amplitudes()]
3546 for Loop_amp in Loop_amps:
3547 loop_amp_ID_to_config[Loop_amp] = curr_config
3548
3549
3550 n_configs = len([k for k in config_index_map.keys() if k!=0])
3551 replace_dict['nmultichannel_configs'] = n_configs
3552
3553
3554
3555 replace_dict['nmultichannels'] = n_configs
3556
3557 res_list = []
3558 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys())
3559 if i!=0]
3560 chunk_size = 6
3561 for k in range(0, len(conf_list), chunk_size):
3562 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \
3563 (k + 1, min(k + chunk_size, len(conf_list)),
3564 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]])))
3565
3566 replace_dict['config_index_map_definition'] = '\n'.join(res_list)
3567
3568 res_list = []
3569 n_loop_amps = max(loop_amp_ID_to_config.keys())
3570 amp_list = [loop_amp_ID_to_config[i] for i in \
3571 sorted(loop_amp_ID_to_config.keys()) if i!=0]
3572 chunk_size = 6
3573 for k in range(0, len(amp_list), chunk_size):
3574 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \
3575 (k + 1, min(k + chunk_size, len(amp_list)),
3576 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]])))
3577
3578 replace_dict['config_map_definition'] = '\n'.join(res_list)
3579