Ejemplo n.º 1
0
    def __init__(self):
        self.IS_ALIVE = True
        self.socket = None

        self.db = DB() 
        self.tc_manager = Testcases(self.db)
        self.token_manager = Token(self.db)
        self.limits = Limits(self.db.getConf()[3], self.db.getConf()[4])

        self.queue = []
        self.workers = []

        self.allowed_languages = ['c++11', 'c++14', 'c++17']
Ejemplo n.º 2
0
def main_simulate(in_stream, out_stream):
    limits = Limits(open("rts.lim"))
    loadflow = Loadflow(open("rts.lf"), limits)

    for count, scenario in stream_scenario_generator(in_stream):
        result, result_reason = loadflow.simulate(scenario)
        scenario.result = result
        scenario.result_reason = result_reason
        out_stream.write(str(count) + ", " + str(scenario) + "\n")
Ejemplo n.º 3
0
    def from_dict(cls, robot, subgait_dict, gait_name, subgait_name, version,
                  *args):
        """List parameters from the yaml file in organized lists.

        :param robot:
            The robot corresponding to the given sub-gait file
        :param subgait_dict:
            The dictionary extracted from the yaml file
        :param gait_name:
            The name of the parent gait
        :param subgait_name:
            The name of the child (sub)gait
        :param version:
            The version of the yaml file

        :returns
            A populated Subgait object
        """
        if robot is None:
            rospy.logerr('Cannot create gait without a loaded robot.')
            return None

        joint_trajectory = subgait_dict['trajectory']
        duration = rospy.Duration(subgait_dict['duration']['secs'],
                                  subgait_dict['duration']['nsecs']).to_sec()

        joint_list = []
        for joint_name in joint_trajectory['joint_names']:
            urdf_joint = cls._get_joint_from_urdf(robot, joint_name)
            if urdf_joint is None:
                rospy.logwarn('Not all joints in gait are in robot.')
                continue

            limits = Limits(urdf_joint.safety_controller.soft_lower_limit,
                            urdf_joint.safety_controller.soft_upper_limit,
                            urdf_joint.limit.velocity)

            joint_list.append(
                cls.joint_class.from_dict(subgait_dict, joint_name, limits,
                                          duration, *args))

        subgait_type = subgait_dict['gait_type'] if subgait_dict.get(
            'gait_type') else ''
        subgait_description = subgait_dict['description'] if subgait_dict.get(
            'description') else ''

        return cls(joint_list, duration, subgait_type, gait_name, subgait_name,
                   version, subgait_description)
Ejemplo n.º 4
0
def main_test(out_stream):
    """print the results and the intermediate file for
       a number of interesting scenarios. So we can check
       by hand if the intermediate file generator and the
       simulator are doing the correct thing.
    """

    batch_string = ""
    batch_string += "1, base, None, , 1.0" + windstr(1) + "\n"             # base - as normal
    batch_string += "1, half, None, , 0.5" + windstr(1) + "\n"             # half load power
    batch_string += "1, tenth, None, , 0.1" + windstr(1) + "\n"            # tenth load power
    batch_string += "1, island, None, , 1.0" + windstr(1) + ", B11\n"      # island
    batch_string += "1, slack, None, , 1.0" + windstr(1) + ", G12\n"       # removed 1 slack bus
    batch_string += "1, slack-all, None, , 1.0" + windstr(1) + ", G12, G13, G14\n"  # removed all slack busses
    batch_string += "1, line, None, , 1.0" + windstr(1) + ", A2\n"         # remove 1 line
    batch_string += "1, gen, None, , 1.0" + windstr(1) + ", G24\n"         # remove 1 generator
    batch_string += "1, bus, None, , 1.0" + windstr(1) + ", 104\n"         # remove 1 bus without generators
    batch_string += "1, bus-gen, None, , 1.0" + windstr(1) + ", 101\n"     # remove 1 bus with generators attached
    batch_string += "1, bus-slack, None, , 1.0" + windstr(1) + ", 113\n"   # remove slack bus and all slack generators
    batch_string += "1, bus-island, None, , 1.0" + windstr(1) + ", 208\n"  # remove bus that causes island
    batch_string += "1, high-load, None, , 1.10" + windstr(1) + "\n"       # load power high
    batch_string += "1, over-max, None, , 1.15" + windstr(1) + "\n"        # load power above max gen power

    if windlevel.num_wind > 0:
        batch_string += "1, wind-50, None, , 1.0" + windstr(0.5) + "\n"             # base - wind @ 50%
        batch_string += "1, wind-10, None, , 1.0" + windstr(0.1) + "\n"             # base - wind @ 10%
        batch_string += "1, wind-200, None, , 1.0" + windstr(2.0) + "\n"             # base - wind @ 200%

    in_stream = StringIO(batch_string)

    limits = Limits(open("rts.lim"))
    loadflow = Loadflow(open("rts.lf"), limits)

    for count, scenario in stream_scenario_generator(in_stream):
        intermediate_file = open(scenario.scenario_type + ".csv", "w")

        try:
            loadflow.lfgenerator(intermediate_file, scenario)
            result, result_reason = loadflow.simulate(scenario)
        except Exception, e:
            # remove `,` from message
            result, result_reason = (False, ''.join(c for c in str(e) if c not in ','))

        intermediate_file.close()
        scenario.result = result
        scenario.result_reason = result_reason
        out_stream.write(str(count) + ", " + str(scenario) + "\n")
Ejemplo n.º 5
0
from stateful_queries import save_query, last_query
# pylint: disable=wrong-import-position,wrong-import-order

if not os.path.exists(os.path.dirname(LOG_FILE)):
    os.makedirs(os.path.dirname(LOG_FILE))
logging.basicConfig(filename=LOG_FILE,
                    level=logging.DEBUG,
                    format='%(asctime)s %(message)s')

app = Flask(__name__)  # pylint: disable=invalid-name
app.jinja_loader = jinja2.ChoiceLoader([
    app.jinja_loader,
    jinja2.FileSystemLoader(TEMPLATES),
])

LIMITS = Limits()


def is_html_needed(user_agent):
    """
    Basing on `user_agent`, return whether it needs HTML or ANSI
    """
    plaintext_clients = [
        'curl', 'wget', 'fetch', 'httpie', 'lwp-request', 'openbsd ftp',
        'python-requests'
    ]
    return all([x not in user_agent for x in plaintext_clients])


def is_result_a_script(query):
    return query in [':cht.sh']
Ejemplo n.º 6
0
 def __init__(self, telecommand):
     super(RawI2CData, self).__init__(telecommand, Limits().max_frame_payload_size())
Ejemplo n.º 7
0
 def get_response_bytes_count(self):
     return 5 * Limits().max_frame_payload_size()
Ejemplo n.º 8
0
 def get_response_bytes_count(self):
     payload = self.get_payload()
     path_length = payload[1]
     seqs = payload[(path_length + 3):]
     seqs_count = len(seqs) / 4
     return seqs_count * Limits().max_frame_payload_size()
Ejemplo n.º 9
0
                    NOT_FOUND_LOCATION, \
                    MALFORMED_RESPONSE_HTML_PAGE, \
                    PLAIN_TEXT_AGENTS, PLAIN_TEXT_PAGES, \
                    MY_EXTERNAL_IP, QUERY_LIMITS
from location import is_location_blocked, location_processing
from limits import Limits
from wttr import get_wetter, get_moon
from wttr_line import wttr_line

if not os.path.exists(os.path.dirname(LOG_FILE)):
    os.makedirs(os.path.dirname(LOG_FILE))
logging.basicConfig(filename=LOG_FILE,
                    level=logging.DEBUG,
                    format='%(asctime)s %(message)s')

LIMITS = Limits(whitelist=[MY_EXTERNAL_IP], limits=QUERY_LIMITS)


def show_text_file(name, lang):
    """
    show static file `name` for `lang`
    """
    text = ""
    if name == ":help":
        text = open(get_help_file(lang), 'r').read()
        text = text.replace('FULL_TRANSLATION', ' '.join(FULL_TRANSLATION))
        text = text.replace('PARTIAL_TRANSLATION',
                            ' '.join(PARTIAL_TRANSLATION))
    elif name == ":bash.function":
        text = open(BASH_FUNCTION_FILE, 'r').read()
    elif name == ":translation":
Ejemplo n.º 10
0
                    NOT_FOUND_LOCATION, \
                    MALFORMED_RESPONSE_HTML_PAGE, \
                    PLAIN_TEXT_AGENTS, PLAIN_TEXT_PAGES, \
                    MY_EXTERNAL_IP
from location import is_location_blocked, location_processing
from limits import Limits
from wttr import get_wetter, get_moon
from wttr_line import wttr_line

if not os.path.exists(os.path.dirname(LOG_FILE)):
    os.makedirs(os.path.dirname(LOG_FILE))
logging.basicConfig(filename=LOG_FILE,
                    level=logging.DEBUG,
                    format='%(asctime)s %(message)s')

LIMITS = Limits(whitelist=[MY_EXTERNAL_IP], limits=(30, 60, 100))


def show_text_file(name, lang):
    """
    show static file `name` for `lang`
    """
    text = ""
    if name == ":help":
        text = open(get_help_file(lang), 'r').read()
        text = text.replace('FULL_TRANSLATION', ' '.join(FULL_TRANSLATION))
        text = text.replace('PARTIAL_TRANSLATION',
                            ' '.join(PARTIAL_TRANSLATION))
    elif name == ":bash.function":
        text = open(BASH_FUNCTION_FILE, 'r').read()
    elif name == ":translation":
Ejemplo n.º 11
0
import numpy as np
import pandas as pd
import json

from limits import Limits
from predict_loan import PredictLoan
from flask import Flask, request, jsonify
from proposal import Proposal

app = Flask(__name__)

BASE_RATE = 5.05

limits = Limits()
predict = PredictLoan()

print('Service Started...')


@app.route("/nextProposal", methods=['GET', 'POST'])
def nextProposal():
    request_content = request.get_json()

    print("Request: \n", json.dumps(request_content, indent=2))

    customer_grade = int(request_content['grade'])

    fixedLimits = limits.calculateFixedLimits(customer_grade,
                                              request_content['annual_inc'])

    print("Limits:")
Ejemplo n.º 12
0
class Main:
    def __init__(self):
        self.IS_ALIVE = True
        self.socket = None

        self.db = DB() 
        self.tc_manager = Testcases(self.db)
        self.token_manager = Token(self.db)
        self.limits = Limits(self.db.getConf()[3], self.db.getConf()[4])

        self.queue = []
        self.workers = []

        self.allowed_languages = ['c++11', 'c++14', 'c++17']

    def loadSubID(self):
        id = self.db.getConf()[0]
        self.db.saveConf('sub_id', id+1)
        return id

    def startLoop(self):
        Thread(target = self.server_loop_thread).start()
        Thread(target = self.queue_workers).start()

    def shutdown(self):
        self.IS_ALIVE = False
        self.socket.close()

        os.kill(os.getpid(), 9)

    def create_server(self):
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try:
            self.socket.bind( (SERVER['ip'], SERVER['port']) )
        except Exception as e:
            log.write("Failed to start socket: " + str(e), 'warn')
            self.shutdown()

        self.socket.settimeout(1)
        self.socket.listen(10)
        return 0

    def server_loop_thread(self):
        if self.create_server():
            return -1

        log.write("Socket started, listening for requests ...")
        while (self.IS_ALIVE):
            time.sleep(0.5)

            # Ping all workers every 5 seconds
            if (int(time.time()) % 10 <= 1 ):
                self.ping_workers()

            try:
                conn, addr = self.socket.accept()
                conn.settimeout(1)
                data = socket_protocol_recieve(conn)

                if data == SOCKET_RECV_TIMEOUT or data == SOCKET_RECV_EMPTY or data == SOCKET_RECV_FAILED:
                    conn.close()
                    log.write("Invalid connect packet, connection terminated")
                    continue
            except:
                continue

            log.write("Recieving worker from {}".format(addr))

            worker = Worker(conn, addr )
            self.workers.append(worker)

    def queue_workers(self):
        ''' Processing worker queue '''
        while self.IS_ALIVE:
            time.sleep(0.5)
            queue = self.queue[:]
            self.queue = []
            overhead = []

            for task in queue:
                # Slow down network overhead
                time.sleep(0.1)

                # Determine worker with lowest workload
                best, worker  = 999, None
                for wrk in self.workers[:]:
                    res = wrk.communicate([json.dumps({
                        'request': 'workload',
                    })], True)

                    if res in [SOCKET_RECV_EMPTY, SOCKET_RECV_FAILED, SOCKET_RECV_TIMEOUT]:
                        log.write("Bad worker.")
                        self.remove_worker(wrk)
                        continue

                    try:
                        res = json.loads(res[0])['workload']
                    except:
                        log.write("Bad worker response.")
                        continue

                    if (res < best):
                        best = res
                        worker = wrk
                        
                # Check if any worker responded
                if not worker:
                    overhead.append(task)
                    continue

                # Limit workload to 3
                if best > 3:
                    overhead.append(task)
                    continue

                # Send queue request to worker
                if task['request'] == 'submit':
                    print('sent:', task)
                    worker.communicate([json.dumps(task)])

            # Requeue (at front) tasks that failed to execute
            # + self.queue -  some might have queued meanwhile
            self.queue = overhead + self.queue

    def get_status(self, req):
        for worker in self.workers[:]:
            response = worker.communicate([json.dumps(req)], True)

            if response in [SOCKET_RECV_EMPTY, SOCKET_RECV_FAILED, SOCKET_RECV_TIMEOUT]:
                log.write("Bad worker.")
                self.remove_worker(worker)
                continue
            response = json.loads(response[0])
            if 'error' in response and 'running' not in response:
                continue
            response.update({'success': True})
            return response
        return {'success': False, 'error': 'Invalid submission ID'}

    def valid_submission(self, sub):
        ''' Validate the submit request, return a tuple (status, formated) '''

        # Required fields
        req = ['code', 'language']
        for field in req:
            if field not in sub:
                return (0, 'Field "{}" is required'.format(field))
        
        # Check unknown keys
        allowed_keys = ['submission_id', 'language', 'code', 'time_limit', 'memory_limit', 'testcases', 'testcases_id', 'grader']
        for key in sub:
            if key not in allowed_keys:
                return (0, "Unrecognised field '{}'".format(key) )

        # Check for passive-submissions
        passive_submissions = self.db.getConf()[2]
        if passive_submissions:
            only_keys = ['code', 'language', 'testcases_id', 'submission_id']
            for key in sub:
                if key not in only_keys:
                    return (0, "Passive submission doesn't allow: '{}'".format(key) )
            if 'testcases_id' not in sub:
                return (0, "Passive submission requires testcases_id")


        # Replace common languages to accepted format
        lang_repl = {'c++':'c++11'}
        for lang in lang_repl:
            if sub['language'] == lang:
                sub['language'] = lang_repl[lang]

        # Check for allowed languages
        languages = self.allowed_languages
        if sub['language'] not in languages:
            return (0, 'Language {} is not supported'.format( sub['language'] ))

        # Apply default filters
        default = {
            'time_limit': 1000,
            'memory_limit': 256,
            'testcases': [],
            'testcases_id': -1,
            'grader': '',
        }
        for key in default:
            if key not in sub or not sub[key]:
                sub[key] = default[key]

        # Format to integers
        try:
            sub['time_limit'] = int(sub['time_limit'])
            sub['memory_limit'] = int(sub['memory_limit'])
        except:
            return (0, 'Invalid time/memory resourse value')

        # Format to list of tuples or load tc form file
        sub['testcases'] = self.tc_manager.validTestcases(sub['testcases'])
        if not sub['testcases']:
            sub['testcases'] = self.tc_manager.loadTestcases( sub['testcases_id'] )

            if 'error' in sub['testcases']:
                return (0, 'Testcases should be a list of tuples, or provide a valid testcases ID ')
            else:
                sub['testcases'] = sub['testcases']['testcases']

        # Limit resources
        if sub['time_limit'] > 15*1000:
            return (0, "Time limit can't be grather then 15000ms")
        elif sub['time_limit'] < 100:
            return (0, "Time limit can't be lower then 100ms")
        elif sub['memory_limit'] > 256:
            return (0, "Memory limit can't be grather then 256mb")
        elif sub['memory_limit'] < 4:
            return (0, "Memory limit can't be lower then 4mb")

        return (1, sub)

    def valid_status(self, stat):
        ''' Validate the status request, returns a tuple (status, formated) '''
        
        # Required fields
        req = ['submission_id']
        for field in req:
            if field not in stat:
                return (0, 'Field "{}" is required'.format(field))

        # Check unknown keys
        allowed_keys = ['submission_id', 'output']
        for key in stat:
            if key not in allowed_keys:
                return (0, 'Unrecognised field "{}"'.format(key) )
        
        # Check if output is in valid format
        if 'output' in stat:
            try:
                stat['output'] = bool(eval(stat['output']))
            except:
                return (0, 'Output field has unknown value "{}"'.format(stat['output']))

        # Fix output format if it is not included
        if 'output' not in stat:
            stat['output'] = False
        
        return (1, stat)


    def add_queue(self, task):
        if (task['request'] == 'submit'):
            valid = self.valid_submission( task['data'] )
            if valid[0]:
                if (len(self.queue) > 20):
                    return {'success': False, 'error': "Queue is full. Try again in few moments"}

                task['data'] = valid[1]
                task['data']['submission_id'] = self.loadSubID()
                self.queue.append(task)
                return {'success': True, 'submission_id': task['data']['submission_id']}
            else:
                return {'success': False, 'error': valid[1]}
        
        if (task['request'] == 'status'):
            # Check if status is in valid format
            valid = self.valid_status(task['data'])
            if (not valid[0]):
                return {'success': False, 'error': valid[1] }
            task['data'] = valid[1]

            return self.get_status(task)

        if (task['request'] == 'testcases'):
            if 'testcases' not in task['data']:
                return {'success': False, 'error': "testcases field is required"}
            else:
                res = self.tc_manager.saveTestcases(task['data']['testcases'])
            if 'error' in res:
                res['success'] = False
            else:
                res['success'] = True
            return res

        if (task['request'] == 'create-token'):
            req = self.token_manager.validateRequest(task['data'])
            if not req[0]:
                return {'success': False, 'error': req[1] }
            else:
                token = self.token_manager.createToken(req[1]['owner'], req[1]['expiration'], req[1]['access_level'] )
                return {'success': True, 'token': token}

        if (task['request'] == 'info-token'):
            req = self.token_manager.tokenInformation(task['data']['token'])
            if not req[0]:
                return {'success': False, 'error': req[1] }
            else:
                return {'success': True, 'token': task['data']['token'], 'owner': req[1]['owner'], 'access_level':req[1]['access_level'], 'expiration':req[1]['expiration']-time.time() }

        if (task['request'] == 'delete-token'):
            req = self.token_manager.deleteToken(task['data']['token'], task['data']['owner'])
            if not req[0]:
                return {'success': False, 'error': req[1] }
            else:
                return {'success': True, 'message': req[1]}

        if (task['request'] == 'settings'):
            try:
                changed = []
                if 'token_access' in task['data']:
                    self.db.saveConf('token_access', task['data']['token_access'])
                    changed.append('token_access')
                if 'passive_submissions' in task['data']:
                    self.db.saveConf('passive_submissions', task['data']['passive_submissions'])
                    changed.append('passive_submissions')
                if 'ip_status_timeout' in task['data']:
                    self.db.saveConf('ip_status_timeout', task['data']['ip_status_timeout'])
                    changed.append('ip_status_timeout')
                if 'ip_submit_timeout' in task['data']:
                    self.db.saveConf('ip_submit_timeout', task['data']['ip_submit_timeout'])
                    changed.append('ip_submit_timeout')
                self.limits.setLimits(self.db.getConf()[3], self.db.getConf()[4])
            except Exception as e:
                return {'success': False, 'error': str(e)}
            if not changed:
                return {'success': False, 'error': 'Nothing has been changed. No valid argument provided.'}
            return {'success': True, 'message': 'Changed '+', '.join(changed)}

        else:
            return {'success':False,'error': 'Unknown request'}

    def ping_workers(self):
        for worker in self.workers[:]:
            res = worker.communicate([json.dumps({
                'request': 'ping',
            })])
            if res:
                self.remove_worker(worker)

    def remove_worker(self, worker):
        for wrk in self.workers[:]:
            if wrk == worker:
                log.write("Worker {} removed".format(worker.addr))
                worker.conn.close()
                self.workers.remove(worker)
                del worker

    def workers_connected(self):
        ''' Returns number of connected workers '''
        return len(self.workers)
Ejemplo n.º 13
0
def four_lepton(name, channels, directory, scale=1.0, fs=None, tau=False):
    for mass in _4L_MASSES:

        if tau:
            cuts = '(%f < h1mass) & (h1mass < %f)' % (0.5*mass, 1.1*mass)
            cuts += '& (%f < h2mass) & (h2mass < %f)' % (0.5*mass, 1.1*mass)
            cuts += '& (z_sep > 10)'
            cuts += '& ((%f < sT) | (400 < sT))' % (mass + 100.0)
        else:
            cuts = '(%f < h1mass) & (h1mass < %f)' % (0.9*mass, 1.1*mass)
            cuts += '& (%f < h2mass) & (h2mass < %f)' % (0.9*mass, 1.1*mass)
            #cuts += '& (z_sep > 20)'
            cuts += '& (%f < sT)' % (0.6*mass + 130.0)

        cuts += '& (%s)' % ' | '.join(['(channel == "%s")' % channel \
                for channel in channels])

        limits = Limits("DblH", cuts, "./ntuples", "%s/%i" % (directory, mass),
                channels=["dblh4l"], lumi=19.7, blinded=True)

        limits.add_group("hpp%i" % mass, "HPlus*%i*" % mass, isSignal=True,
                scale=scale, allowed_decays=fs)
        limits.add_group("data", "data_*", isData=True)

        lumi = {'hpp%i' % mass: 1.026}
        limits.add_systematics("lumi", "lnN", **lumi)

        hpp_sys = {'hpp%i' % mass: 1.15}
        limits.add_systematics("sig_mc_err", "lnN", **hpp_sys)

        eff_syst = efficiency_systematic(name)

        # Add the muon efficiency systematic if it exists for this channel
        if eff_syst[0]:
            mu_eff = {'hpp%i' % mass: eff_syst[0]}
            limits.add_systematics("mu_eff", "lnN", **mu_eff)

        # Add the electron efficiency systematic if it exists for this channel
        if eff_syst[1]:
            e_eff = {'hpp%i' % mass: eff_syst[1]}
            limits.add_systematics("e_eff", "lnN", **e_eff)

        if tau:
            N_db_data = mky.data_sideband(
                mass,
                '(%s)' % ' | '.join(['(channel == "%s")' % channel for channel in channels]),
                cuts='(10 < z_sep) & ((%f < sT) | (400 < sT))' % (mass + 100.0),
                tau=True)

            alpha = mky.alpha(
                mass,
                '(%s)' % ' | '.join(['(channel == "%s")' % channel for channel in channels]),
                tau=True)
        else:
            N_db_data = mky.data_sideband(
                mass,
                '(%s)' % ' | '.join(['(channel == "%s")' % channel for channel in channels]),
                #cuts='(z_sep > 80) & (%f < sT)' % (0.6*mass + 130.0))
                #cuts='(z_sep > 20) & (mass > 0)')
                cuts='(%f < sT)' % (0.6*mass + 130.0))

            alpha = mky.alpha(
                mass,
                '(%s)' % ' | '.join(['(channel == "%s")' % channel for channel in channels]))

        limits.add_bkg_rate("bkg_sb_%s" % channels[0], float(N_db_data) * alpha)
        kwargs = {"bkg_sb_%s" % channels[0]: alpha}
        limits.add_systematics("bkg_err_%s" % channels[0], "gmN %i" % N_db_data, **kwargs)

        kwargs = {"bkg_sb_%s" % channels[0]: 1.10}
        limits.add_systematics("alph_err_%s" % channels[0], "lnN", **kwargs)

        limits.gen_card("%s.txt" % name)
Ejemplo n.º 14
0
def fourl(mass):
    logger.info("Processing mass-point %i" % mass)

    cuts = '(%f < h1mass) & (h1mass < %f)' % (0.9*mass, 1.1*mass)
    cuts += '& (%f < sT)' % (0.6*mass + 130.0)
    cuts += ('& ((channel == "mmmm") | (channel == "eeee") | (channel == "eemm") |'
             '(channel == "mmee") | (channel == "meme") | (channel == "emem") |'
             '(channel == "emme") | (channel == "meem") |'
             '(channel == "eeem") | (channel == "eeme") | (channel == "emee") | (channel == "meee") |'
             '(channel == "emmm") | (channel == "memm") | (channel == "mmem") | (channel == "mmme"))')

    limits = Limits("DblH", cuts, "./ntuples", "./datacards/light_lep_all/%i" % mass,
            channels=["dblh4l"], lumi=19.7, blinded=True)

    limits.add_group("hpp%i" % mass, "HPlus*%i*" % mass, isSignal=True)
    limits.add_group("dyjets", "DYJets*")
    limits.add_group("zz", "ZZTo*")
    limits.add_group("top", "T*")
    limits.add_group("data", "data_*", isData=True)

    lumi = {'hpp%i' % mass: 1.026,
            'dyjets':       1.026,
            'zz':           1.026,
            'top':          1.026}
    limits.add_systematics("lumi", "lnN", **lumi)

    mu_eff = {'hpp%i' % mass: 1.043,
              'dyjets':       1.043,
              'zz':           1.043,
              'top':          1.043}

    e_eff = {'hpp%i' % mass: 1.101,
             'dyjets':       1.101,
             'zz':           1.101,
             'top':          1.101}

    limits.add_systematics("mu_eff", "lnN", **mu_eff)
    limits.add_systematics("e_eff", "lnN", **e_eff)

    hpp_sys = {'hpp%i' % mass: 1.15}

    limits.add_systematics("mc_err", "lnN", **hpp_sys)

    limits.gen_card("4l.txt")
Ejemplo n.º 15
0
def mmmm_100(mass):
    logger.info("Processing mass-point %i" % mass)

    cuts = '(%f < h1mass) & (h1mass < %f)' % (0.9*mass, 1.1*mass)
    cuts += '& (%f < sT)' % (0.6*mass + 130.0)
    cuts += '& (channel == "mmmm")'

    limits = Limits("DblH", cuts, "./ntuples", "./datacards/mmmm100/%i" % mass,
            channels=["dblh4l"], lumi=19.7, blinded=True)

    limits.add_group("hpp%i" % mass, "HPlus*%i*" % mass, isSignal=True, scale=36.0)
    limits.add_group("dyjets", "DYJets*")
    limits.add_group("zz", "ZZTo*")
    limits.add_group("top", "T*")
    limits.add_group("data", "data_*", isData=True)

    lumi = {'hpp%i' % mass: 1.026,
            'dyjets':       1.026,
            'zz':           1.026,
            'top':          1.026}
    limits.add_systematics("lumi", "lnN", **lumi)

    mu_eff = {'hpp%i' % mass: 1.043,
              'dyjets':       1.043,
              'zz':           1.043,
              'top':          1.043}
    limits.add_systematics("mu_eff", "lnN", **mu_eff)

    limits.gen_card("test.txt")
Ejemplo n.º 16
0
def four_lepton_mc(name, channels, directory, scale=1.0):
    for mass in _4L_MASSES:
        cuts = '(%f < h1mass) & (h1mass < %f)' % (0.9*mass, 1.1*mass)
        cuts += '& (%f < h2mass) & (h2mass < %f)' % (0.9*mass, 1.1*mass)
        cuts += '& (%f < sT)' % (0.6*mass + 130.0)
        cuts += '& (%s)' % ' | '.join(['(channel == "%s")' % channel for channel in channels])

        limits = Limits("DblH", cuts, "./ntuples", "%s/%i" % (directory, mass),
                channels=["dblh4l"], lumi=19.7, blinded=True)

        limits.add_group("hpp%i" % mass, "HPlus*%i*" % mass, isSignal=True, scale=scale)
        limits.add_group("dyjets", "DYJets*")
        limits.add_group("zz", "ZZTo*")
        limits.add_group("top", "T*")
        limits.add_group("data", "data_*", isData=True)

        lumi = {'hpp%i' % mass: 1.026,
                'dyjets':       1.026,
                'zz':           1.026,
                'top':          1.026}
        limits.add_systematics("lumi", "lnN", **lumi)

        hpp_sys = {'hpp%i' % mass: 1.15}
        limits.add_systematics("sig_mc_err", "lnN", **hpp_sys)

        eff_syst = efficiency_systematic(name)

        if eff_syst[0]:
            mu_eff = {'hpp%i' % mass: eff_syst[0],
                      'dyjets':       eff_syst[0],
                      'zz':           eff_syst[0],
                      'top':          eff_syst[0]}
            limits.add_systematics("mu_eff", "lnN", **mu_eff)

        if eff_syst[1]:
            e_eff = {'hpp%i' % mass: eff_syst[1],
                     'dyjets':       eff_syst[1],
                     'zz':           eff_syst[1],
                     'top':          eff_syst[1]}
            limits.add_systematics("e_eff", "lnN", **e_eff)

        limits.gen_card("%s.txt" % name)