示例#1
0
文件: consul.py 项目: luhn/pubsubclub
 def requeue(self, _=None):
     run = lambda: self._query_services(wait=POLL_WAIT, debounce=True)
     if unix_timestamp() - self.last_queued < MIN_QUERY_PERIOD:
         d = deferLater(reactor, MIN_QUERY_PERIOD, run)
     else:
         d = run()
     self.last_queued = unix_timestamp()
     d.addCallback(self.requeue)
     d.addErrback(self._handle_api_error)
示例#2
0
def generate_schedule_commands(schedule):
    # FUTURE
    # We could support more than 10 items using more than one profile sequence
    # Just add more commands in the returned list of commands with different profile sequences/ids (which one?)
    profile_id = 1
    profile_sequence = 0
    dali_address = 254  # broadcast
    days = 254  # all days, minus holidays

    steps = []
    for time, _cmd_id, cmd_name in schedule:
        try:
            if cmd_name.startswith('set_dim_level_'):
                level = int(cmd_name[len('set_dim_level_'):])
                steps.append((time, level))
        except:
            pass

    set_schedule_cmd = {
        'data': encode_profile_configuration_message(profile_id, profile_sequence, dali_address, days, steps).decode(),
        'fport': 50
    }
    set_clock_cmd = {
        'data': encode_time_settings_configuration_message(int(unix_timestamp()) + 3*60*60).decode(),
        'fport': 50
    }

    return [
        set_schedule_cmd,
        set_clock_cmd
    ]
示例#3
0
def convert_to_saccade_node(trial_node):
    n = trial_node
    return {
        'name': n['name'] + 'saccade/',
        'timestamp': int(unix_timestamp()),
        'format': 'gazelle/v1/saccade/',
        'meta': {},
        'function': ['saccade_em'],
        'function_version': [],  # virgin
        'input': [n['name']],
        'input_timestamp': [],  # virgin
        'output': [],  # virgin
    }
示例#4
0
def convert_to_project_node(session_nodes):
    '''
    Combine sessions to get an aggregate of all trial sequences and trials.
    '''
    if len(session_nodes) < 1:
        raise Exception('Should receive at least one session node.')

    # More general name: remove method and participant
    ses_name = session_nodes[0]['name']
    node_name = '/'.join(ses_name.split('/')[:-8]) + '/'

    return {
        'name': node_name,
        'timestamp': int(unix_timestamp()),
        'format': 'gazelle/v1/project/simple/',
        'meta': {},
        'function': ['trial_project'],
        'function_version': [],  # virgin
        'input': map(lambda n: n['name'], session_nodes),
        'input_timestamp': [],  # virgin
        'output': [],  # virgin
    }
示例#5
0
def convert_to_session_node(sequence_nodes):
    '''
    Combine sequences
    '''
    if len(sequence_nodes) < 1:
        raise Exception('Should receive at least one sequence node.')

    # More general name: remove SRT id
    seq_name = sequence_nodes[0]['name']
    node_name = '/'.join(seq_name.split('/')[:-2]) + '/'

    return {
        'name': node_name,
        'timestamp': int(unix_timestamp()),
        'format': 'gazelle/v1/session/simple/',
        'meta': {},
        'function': ['trial_session'],
        'function_version': [],  # virgin
        'input': map(lambda n: n['name'], sequence_nodes),
        'input_timestamp': [],  # virgin
        'output': [],  # virgin
    }
示例#6
0
def by_prototype(baby_node):
    '''
    Generates valid node from minimal node dictionary.
    Required keys are 'name' and 'format'.
    '''
    b = baby_node  # alias
    adult_node = {}

    required_keys_and_types = {
        'name': str,
        'format': str
    }

    keys_and_defaults = {
        'function': [],
        'function_version': [],
        'input': [],
        'input_timestamp': [],
        'function_version': [],
        'output': None,
        'meta': {},
        'timestamp': int(unix_timestamp()),
    }

    for k, t in required_keys_and_types.items():
        if k not in b:
            raise InvalidNodeCandidate('Node does not have required keys.')
        if isinstance(b[k], t):
            raise InvalidNodeCandidate('Node key ' + k + ' has invalid type.')
        adult_node[k] = b[k]

    for k, default in keys_and_defaults.items():
        if k in baby_node:
            adult_node[k] = baby_node[k]
        else:
            adult_node[k] = default

    return adult_node
示例#7
0
def compute_node(db, node, policy='init'):
    '''
    Ensure that node is up-to-date and if not then execute it's function.
    Return the ensured node.

    Computation policies:
      policy='init' (default)
        compute only if not computed before.
      policy='ensure'
        ensure that cache is up-to-date and execute only if necessary.
      policy='force'
        compute always, disregard any caches.
    '''
    print 'Computing node: ' + node['name']
    #import pdb; pdb.set_trace()

    if policy not in ['init', 'ensure', 'force']:
        raise ValueError('Unknown policy: ' + policy)

    # Node might not have been found
    if node is None:
        raise InvalidNodeError('Invalid node: None')

    # Ensure that the object really has sufficient properties
    if any(map(lambda x: x not in node, ['name', 'function', 'output'])):
        raise InvalidNodeError('Node does not have required attributes.')

    # Aliases to simplify names.
    input_names = node['input']
    input_times = node['input_timestamp']
    fn_versions = node['function_version']
    output      = node['output']
    fn_names    = node['function']

    # Ensure that the object really has sufficient properties
    if not isinstance(fn_versions, list):
        raise InvalidNodeError('Node\'s function versions should be a list.')
    if not isinstance(fn_names, list):
        raise InvalidNodeError('Node\'s function should be a list.')

    # Node without function is a data node, and does not need execution
    if len(fn_names) < 1:
        # A data node, no functions to execute
        return node

    # If node has been run before, return it without recomputation
    if policy == 'init' and len(fn_versions) == len(fn_names):
        return node

    # Gather the data for execution. Load input nodes:
    input_nodes = map(lambda n: get_node.by_name_computed(db, n, policy), input_names)

    # Import functions.
    fn_modules = []
    for fn_name in fn_names:

        # Import the function
        try:
            modu = import_module('functions.' + fn_name)
        except ImportError as e:
            raise InvalidNodeFunctionError('No node function ' + fn_name +
              ' can be found or there is problem in the module: ' + str(e))

        # Assert: import successful
        if not hasattr(modu, 'execute') or not hasattr(modu, 'version'):
            raise InvalidNodeFunctionAPIError('Node function should have an ' +
            'execute method and version property.')

        fn_modules.append(modu)

    # If the function versions have remained the same
    # and cached output has same timestamp as the input nodes,
    # then do not recompute. However, under 'force' policy, compute
    # anyway. Under 'init' policy, if we got this far, nothing is up-to-date.
    if policy != 'force':
        if len(input_times) == len(input_nodes): # Test if virgin
            if all(map(lambda nt: nt[0]['timestamp'] == nt[1],
                       zip(input_nodes, input_times))):
                # Output is up to date
                if len(fn_names) == len(fn_versions):
                    if all(map(lambda fv: fv[0].version == fv[1],
                               zip(fn_modules, fn_versions))):
                        # no reason to recompute
                        return node

    # Execute functions from left to right, feeding the prev output to
    # next input. Give input nodes as parameters to the first.
    input_args = input_nodes
    for modu in fn_modules:
        # Execute the function TODO function composition
        input_args = modu.execute(input_args)
    # The last input_args is the final output.

    # Store results and update timestamps. Like a cache.
    node['output'] = input_args
    node['timestamp'] = int(unix_timestamp())
    node['input_timestamp'] = map(lambda n: n['timestamp'], input_nodes)
    node['function_version'] = map(lambda m: m.version, fn_modules)

    #pdb.set_trace()
    db.nodes.replace_one({'_id': node['_id']}, node)

    return node
示例#8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'asg_base_name',
        help=(
            'The base name of the ASG.  Turquoise will look for an ASG a ' +
            'equal to the base name or the base name plus a hyphen plus a ' +
            'unix timestamp (e.g., "my-asg-1234567").'
        ),
    )
    parser.add_argument(
        'ami_id',
        help='The ID of the AMI you would like to deploy.',
    )
    args = parser.parse_args()

    asg_client = boto3.client('autoscaling')
    elb_client = boto3.client('elb')
    asg_new_name = args.asg_base_name + '-' + str(int(unix_timestamp()))

    print('Searching for ASG...')
    asg = find_asg(asg_client, args.asg_base_name)
    if asg is None:
        raise Exception('Could not find ASG.')

    print('Loading LC...')
    lc = asg_client.describe_launch_configurations(
        LaunchConfigurationNames=[asg['LaunchConfigurationName']],
    )['LaunchConfigurations'][0]

    print('Suspending ASG...')
    asg_client.suspend_processes(
        AutoScalingGroupName=asg['AutoScalingGroupName'],
    )

    print('Creating new launch configuration...')
    new_lc = clone_lc(asg_client, lc, asg_new_name, args.ami_id)
    print('Creating new ASG...')
    new_asg = clone_asg(
        asg_client,
        asg,
        asg_new_name,
        new_lc['LaunchConfigurationName'],
    )

    has_lbs = len(asg['LoadBalancerNames']) > 0
    if has_lbs:
        asg_client.attach_load_balancers(
            AutoScalingGroupName=asg_new_name,
            LoadBalancerNames=asg['LoadBalancerNames'],
        )

    print('Waiting for instances to boot...')
    wait_for_instances(
        asg_client, new_asg, 'InService', 'Healthy',
        new_asg['DesiredCapacity'],
    )

    if has_lbs:
        # Wait for LB health checks
        print('Waiting for ELB health checks...')
        for lb_name in asg['LoadBalancerNames']:
            wait_for_lb_instances(elb_client, lb_name)

    print('Scaling down and deleting old ASG...')
    delete_asg(asg_client, asg)
    print('Deleting old LC...')
    asg_client.delete_launch_configuration(
        LaunchConfigurationName=asg['LaunchConfigurationName'],
    )
示例#9
0
文件: parse.py 项目: Kagee/mineparse
def put(action,timestamp,nick,message):
	if(len(storage) > qsize):
		storage.pop(0)
	storage.append({'action':action,'timestamp':timestamp,'nick':nick,'message':message,'unix_timestamp':unix_timestamp()})
	writeJSON()
示例#10
0
def convert_to_trial_nodes(gazedata_path, config_path):
    '''
    Return
      list of nodes
    '''
    # Open file
    config = loadjson(config_path)

    g = loadCsvAsJson(gazedata_path, dialect='excel-tab')
    # Collect meta from filename
    meta = rec.recognize_from_filename(gazedata_path)
    # Pick config id from filemeta
    config_id = meta['trial_configuration_id']
    # Pick meta from config. Find correct config by config id.
    trialseq_config = filter(lambda x: x['name'] == config_id,
                             config)[0]
    aoi_rects = trialseq_config['aois']

    # Create entries (and collections) for each + entry for source file

    # For each trial, create a trial node
    trials = gazedata.getTrials(g)
    nodes = []
    for trial in trials:

        # Collect trial meta
        relevant_g = gazedata.getSaccadeWindow(trial)
        rel_start, rel_end = gazedata.getSaccadeWindowRange(trial)
        aoi_xy = gazedata.getTargetLocation(relevant_g, aoi_rects)
        num_valid_p = gazedata.countValidGazepoints(trial)
        num_valid_sac_p = gazedata.countValidGazepoints(relevant_g)
        if len(trial) > 0:
            validity_ratio = float(num_valid_p) / len(trial)
        else:
            validity_ratio = 0

        trial_num = gazedata.getTrialNumber(trial)
        trial_num_str = str(trial_num).zfill(2)
        age_str = str(meta['participant_age_months']) + 'mo'

        # Normalize gazepoints
        norm_g = map(tobii_to_gazelle, trial)

        name_root = '/'.join([
            'icl',
            'cg',
            'person',
            meta['participant_id'],
            'age',
            age_str,
            'method',
            meta['method_version'],
            'calib' if meta['calibration_successful'] else 'nocalib',
            meta['trial_configuration_id'].lower(),
            'trial',
            trial_num_str,
        ])

        trial_node = {
            'name': name_root + '/',
            'timestamp': int(unix_timestamp()),
            'format': 'gazelle/v1/trial/simple/',
            'meta': {
                'date': gazedata.get_trial_date(trial),
                'participant_id': meta['participant_id'],
                'method_version': meta['method_version'],
                'participant_age_months': meta['participant_age_months'],
                'calibration_successful': meta['calibration_successful'],
                'trial_configuration_id': meta['trial_configuration_id'],
                'trial_number': trial_num,
                'num_points': len(norm_g),
                'num_valid_points': num_valid_p,
                'validity_ratio': validity_ratio,
                'aoi_x_rel': aoi_xy[0],
                'aoi_y_rel': aoi_xy[1],
                'source_file': os.path.basename(gazedata_path),
                'tags': {
                    'target': {'range': [rel_start, -1]},
                    'saccade_window': {'range': [rel_start, rel_end]},
                },
            },
            'function': [],
            'function_version': [],
            'input': [],
            'input_timestamp': [],
            'output': norm_g,
        }

        nodes.append(trial_node)
    return nodes