-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtemplate_crab_grid_copyToEOS_JSON.cfg
152 lines (115 loc) · 4.93 KB
/
template_crab_grid_copyToEOS_JSON.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
[CRAB]
jobtype = cmssw
scheduler = remoteGlidein
### NOTE: just setting the name of the server (pi, lnl etc etc )
### crab will submit the jobs to the server...
use_server = 0
[CMSSW]
### The data you want to access (to be found on DBS)
#datasetpath=/BeamHaloExpress/BeamCommissioning09-Express-v1/FEVT
#datasetpath=/MinBias/Summer09-STARTUP3X_V8D_900GeV-v1/GEN-SIM-RECO
#datasetpath=None
###########################
datasetpath = THISDATASET
###########################
#dbs_url=http://cmsdbsprod.cern.ch/cms_dbs_caf_analysis_01/servlet/DBSServlet
### A single processed dataset can contain multiple runs (in the case of real data !!! )
### following selection can narrow down the run selection within a single processed dataset
### selection can be a comma-separated list of run numbers and run number ranges: 1,2,3-4
### The ParameterSet you want to use
###########################
pset = THISCMSSWCONFIGFILE
###########################
### Splitting parameters
#total_number_of_events=20
#events_per_job = 1000
#number_of_jobs = 2
###########################
number_of_jobs = THISNJOBS
###########################
### The output files (comma separated list)
###########################
output_file = THISOUTPUTFILE
#output_file = QCDDiJetPt120to170__Summer08_IDEAL_V9_v1__GEN-SIM-RECO.root
###########################
############ JSON PART ##############
lumi_mask = THISJSONFILE
total_number_of_lumis = THISNLUMIS
#####################################
[USER]
### OUTPUT files Management
## output back into UI
return_data = 0
### To use a specific name of UI directory where CRAB will create job to submit (with full path).
### the default directory will be "crab_0_data_time"
#ui_working_dir = /full/path/Name_of_Directory
###########################
ui_working_dir = THISUIWORKINGDIR
###########################
###########################
outputdir = THISOUTPUTDIR
###########################
### To specify the UI directory where to store the CMS executable output
### FULL path is mandatory. Default is <ui_working_dir>/res will be used.
#outputdir= /full/path/yourOutDir
### To specify the UI directory where to store the stderr, stdout and .BrokerInfo of submitted jobs
### FULL path is mandatory. Default is <ui_working_dir>/res will be used.
#logdir= /full/path/yourLogDir
### OUTPUT files INTO A SE
copy_data = 1
### if you want to copy data in a "official CMS site"
### you have to specify the name as written in
#storage_element = T2_IT_Bari
### the user_remote_dir will be created under the SE mountpoint
### in the case of publication this directory is not considered
#user_remote_dir = name_directory_you_want
### if you want to copy your data at CAF
#storage_element = T2_CH_CAF
### the user_remote_dir will be created under the SE mountpoint
### in the case of publication this directory is not considered
#user_remote_dir = name_directory_you_want
### JSON bug fix ( see https://twiki.cern.ch/twiki/bin/view/CMS/LumiSelJson )
check_user_remote_dir = 0
### if you want to copy your data to your area in castor at cern
### or in a "not official CMS site" you have to specify the complete name of SE
#storage_element=srm-cms.cern.ch
### this directory is the mountpoin of SE
#storage_path=/srm/managerv2?SFN=/castor/cern.ch
### directory or tree of directory under the mounpoint
#user_remote_dir=/user/a/apresyan
#storage_element = srm-cms.cern.ch
#storage_path = /srm/managerv2?SFN=/castor/cern.ch
storage_element = srm-eoscms.cern.ch
storage_path = /srm/v2/server?SFN=/eos/cms/store/
##############################
user_remote_dir=THISUSERREMOTEDIR
##############################
### To publish produced output in a local istance of DBS set publish_data = 1
publish_data=0
### Specify the dataset name. The full path will be <primarydataset>/<publish_data_name>/USER
publish_data_name = name_you_prefer
### Specify the URL of DBS istance where CRAB has to publish the output files
#dbs_url_for_publication = https://cmsdbsprod.cern.ch:8443/cms_dbs_caf_analysis_01_writer/servlet/DBSServlet
###email notifications
thresholdLevel = 100
#eMail = [email protected]
[GRID]
## RB/WMS management:
rb = CERN
#proxy_server = myproxy.cern.ch
## Black and White Lists management:
## By Storage
##se_black_list = T0,T1
#se_white_list = grid-srm.physik.rwth-aachen.de
## By ComputingElement
#ce_black_list = gridce2.pi.infn.it
#ce_black_list = srm.ciemat.es,srm-3.t2.ucsd.edu,hephyse.oeaw.ac.at,maite.iihe.ac.be,t2-srm-02.lnl.infn.it,sbgse1.in2p3.fr,cmssrm.hep.wisc.edu,cmsdcache.pi.infn.it,srm.minnesota.edu,storm.ifca.es
#ce_white_list =
[CONDORG]
# Set this to condor to override the batchsystem defined in gridcat.
#batchsystem = condor
# Specify addition condor_g requirments
# use this requirment to run on a cms dedicated hardare
# globus_rsl = (condor_submit=(requirements 'ClusterName == \"CMS\" && (Arch == \"INTEL\" || Arch == \"X86_64\")'))
# use this requirement to run on the new hardware
#globus_rsl = (condor_submit=(requirements 'regexp(\"cms-*\",Machine)'))