Ejemplo n.º 1
0
def post(request):
    """
    Create new test plan.
    """
    try:
        new = json.loads(request.body)
        assert "name" in new
    except ValueError:
        return HttpResponseBadRequest("invalid JSON")
    except AssertionError:
        return HttpResponseBadRequest("argument mismatch")

    if 'rules' in new:
        new['rules'] = [rule_model.validate(rule) for rule in new['rules']]
        if None in new['rules']:  # Invalid rules are re-assigned to None
            return HttpResponse("invalid rule(s) provided")

    dbc = db_model.connect()
    testplan = dbc.testplan.find_one({"name": new['name']})
    if testplan is not None:
        return HttpResponseBadRequest("testplan named '%s' already exists" % new['name'])

    new['createdAt'] = datetime.isoformat(datetime.now())
    new['updatedAt'] = datetime.isoformat(datetime.now())
    testplan_id = str(dbc.testplan.save(new))
    r = JsonResponse({"id": testplan_id}, status=200)
    r['location'] = "/api/testplan/%s" % testplan_id
    logger.info("test plan '%s' created by '%s'" % (testplan_id, request.user['username']))
    return r
Ejemplo n.º 2
0
def verify_db_table(conn, table):
    try:
        verify_db(conn)
        result = r.db('wanwipe').table_create(table).run(conn)
        print("{}: BaseDB: {} table created: {}".format(dt.isoformat(dt.now()), table, result), file=sys.stderr)
    except RqlRuntimeError:
        print("{}: BaseDB: {} table found.".format(dt.isoformat(dt.now()), table), file=sys.stderr)
def look_relevant_earthquake(name, times, distance = 400, h = 10, min_mag = 2.5):
    '''
    Search for earthquakes within a time interval with a few restrictions for the station

    :param name: (string) station name
    :param times: list of datetime intervals, formatted as tuples (initial_time, end_time)
    :param distance: (int) distance from the station
    :param h: (int) number of hours after the end of the interval to look for earthquake
    :param min_mag: (int) minimum magnitude to look for earthquake

    :return: (bool list) for each interval, answer if there's at least one relevant earthquake
            satisfying the restrictions
    '''
    ans = []
    stationcoord = station.get(name)

    for begin, end in times:
        earthquake = load_earthquakes(datetime.isoformat(begin),
             datetime.isoformat(end+dt.timedelta(hours=h)), 
             stationcoord, 
             max_distance = distance,
             min_magnitude = min_mag)

        if len(earthquake.index) >= 1:
            ans.append(True)
        else:
            ans.append(False)

    return ans
Ejemplo n.º 4
0
def MonitorCheckCpu(request):
    project = request.GET.get('proname')
    mtype = request.GET.get('mtype')
    pdate = request.GET.get('pdate')
    ldate = request.GET.get('ldate')
    
    if pdate ==None:
        pdate = datetime.isoformat(datetime.now())
    if ldate ==None:    
        ldate = datetime.isoformat(datetime.now()-timedelta(hours =1))
        
    mcursor = MongodbCollection().mcollection.find({"project":project,"mtype":mtype,"mdatetime":{'$gte':ldate,'$lt':pdate}})
    
  

    result = []
    for item in mcursor:
        print item
        #result = [[item['matrix'][18:],mdate(dateutil.parser.parse(item['mdatetime']))]for item in mcursor]
        tmp_rs = [float(value) for value in map(lambda x:x.split('=')[1], item['matrix'][18:].strip().split(' '))]
        tmp_rs.append(mdate(dateutil.parser.parse(item['mdatetime'])))
        result.append(tmp_rs)
        
    print result
    
    return render(request,'devapps/monitorcheckcpu.html',{"result":result})
Ejemplo n.º 5
0
def loopHelper(number, trialNumber):

    global OUTPUT_FILE_NAME

    file_prefix = "./" + str(trialNumber) + "-"
    if os.path.exists(file_prefix+str(number)+".com"):
        with open(OUTPUT_FILE_NAME, 'a') as f:
            s = 'Started movement type '+str(number)+' at '+ datetime.isoformat(datetime.now()) + '\n'
            f.write(s)
        with open('start.txt', 'w') as f:
            s = 'Started movement type '+str(number)+' at '+ datetime.isoformat(datetime.now()) + '\n'
            f.write(s)
        if os.path.exists('./end.txt'):
            os.remove('./end.txt')


        supplyBoxes(number)
        with open(OUTPUT_FILE_NAME, 'a') as f:
            s = 'Finished movement type '+str(number)+' at ' + datetime.isoformat(datetime.now())+ '\n'
            f.write(s)
        with open('end.txt', 'w') as f:
            s = 'Finished movement type '+str(number)+' at '+ datetime.isoformat(datetime.now()) + '\n'
            f.write(s)

        if os.path.exists('./start.txt'):
            os.remove('./start.txt')

        os.remove(file_prefix + str(number)+".com")
        return True
    else:
        return False
Ejemplo n.º 6
0
def write_since_last_indexed():
    """
    Write query for all English documents indexed since last date stored,
    splitting into multiple event files to facilitate multiprocessing.
    """
    # Read the date this script was last run
    with open(LAST_INDEXED, 'r') as f:
        last_indexed = datetime.strptime(f.read().strip(),
                                         '%Y-%m-%dT%H:%M:%S.%f')

    # Set the current date
    now = datetime.utcnow()

    # Split the time delta into equal parts
    delta = total_seconds(now - last_indexed)
    increment = delta / SPLIT
    delimiters = [(last_indexed + timedelta(seconds=increment*i)) for i in
                  range(SPLIT)] + [now]

    # Write the current date to file for future use
    with open(LAST_INDEXED, 'w') as f:
        f.write(datetime.isoformat(now))

    # Write multiple Solr queries to the events directory
    for i in range(SPLIT):
        from_ = datetime.isoformat(delimiters[i])
        to = datetime.isoformat(delimiters[i+1])
        query = 'iscontent:true AND lang:en AND indexed:[%sZ TO %sZ]' % (from_,
                                                                         to)
        with open(EVENT_DIR + '%d' % i, 'w') as f:
            f.write(query)
Ejemplo n.º 7
0
def fetch_incidents():
    param_dict = {}
    now_time = datetime.utcnow()
    now = datetime.isoformat(now_time)
    lastRunObject = demisto.getLastRun()
    if lastRunObject:
        param_dict['since'] = lastRunObject['time']
    else:
        param_dict['since'] = datetime.isoformat(now_time - timedelta(minutes=int(FETCH_INTERVAL)))

    param_dict['until'] = now

    url = SERVER_URL + GET_INCIDENTS_SUFFIX + configure_status()
    res = http_request('GET', url, param_dict)
    _, parsed_incidents, raw_responses = parse_incident_data(res.get('incidents', []))

    incidents = []
    for incident, raw_response in zip(parsed_incidents, raw_responses):
        incidents.append({
            'name': incident['ID'] + ' - ' + incident['Title'],
            'occurred': incident['created_at'],
            'severity': translate_severity(incident['urgency']),
            'rawJSON': json.dumps(raw_response)
        })

    demisto.incidents(incidents)
    demisto.setLastRun({'time': now})
Ejemplo n.º 8
0
def db_refresh(conn):
    """Refresh the timestamp on the database entry to act as a heartbeat.
    """
    print("{}: Refreshing Database.".format(dt.isoformat(dt.now())), file=sys.stderr)
    # noinspection PyUnusedLocal
    updated = r.db('wanwipe').table('machine_state').get(machine_state_uuid).update({
        'updated_at': r.now()}).run(conn)  # Update the record timestamp.
    print("{}: Refreshed Database successfully.".format(dt.isoformat(dt.now())), file=sys.stderr)
Ejemplo n.º 9
0
def post(request):
    """
    Create a new session.
    """
    try:
        new = json.loads(request.body)
        assert "name" in new
        assert "description" in new
        assert "upstreamHost" in new
        assert "upstreamPort" in new
    except AssertionError:
        return HttpResponseBadRequest("argument mismatch")
    except ValueError:
        return HttpResponseBadRequest("invalid JSON")

    dbc = db_model.connect()

    session = {
        'name': new['name'],
        'description': new['description'],
        'username': request.user['username'],
        'upstreamHost': new['upstreamHost'],
        'upstreamPort': new['upstreamPort'],
        'createdAt': datetime.isoformat(datetime.now()),
        'updatedAt': datetime.isoformat(datetime.now()),
        'executions': 0,
    }

    # Add optional fields
    if ("testPlan" in new) and ("id" in new['testPlan']):
        testplan = dbc.testplan.find_one({"_id": ObjectId(new['testPlan']['id'])})
        if testplan is not None:
            session['testPlan'] = {"id": new['testPlan']}
        else:
            return HttpResponseNotFound("testplan '%s' does not exist" % new['testplan'])

    if ("serverOverloadProfile" in new) and ("id" in new['serverOverloadProfile']):
        so_profile = dbc.serveroverload.find_one({"_id": ObjectId(new['serverOverloadProfile']['id'])})
        if so_profile is not None:
            session['serverOverloadProfile'] = {"id": new['serverOverloadProfile']['id']}
        else:
            return HttpResponseNotFound("serverOverloadProfile '%s' does not exist" % new['serverOverloadProfile']['id'])

    if ("qosProfile" in new) and ("id" in new['qosProfile']):
        qos_profile = dbc.qos.find_one({"_id": ObjectId(new['qosProfile']['id'])})
        if qos_profile is not None:
            session['qosProfile'] = {"id": new['qosProfile']['id']}
        else:
            return HttpResponseNotFound("qosProfile'%s' does not exist" % new['qosProfile']['id'])

    try:
        session_id = str(dbc.session.save(session))
    except DuplicateKeyError:
        return HttpResponseBadRequest("session name is not unique")
    r = JsonResponse({"id": session_id})
    r['location'] = "/api/session/%s" % session_id
    logger.info("session '%s' created by '%s'" % (session_id, request.user['username']))
    return r
Ejemplo n.º 10
0
def main():
    """
    Automation script for running scaling tests for Toil Recompute
    """
    parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')
    parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')
    parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')
    parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')
    parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')
    parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')
    parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')
    parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')
    parser.add_argument('-d', '--shared_dir', required=True,
                        help='Full path to directory with: pipeline script, launch script, config, and master key.')
    params = parser.parse_args()

    # Run sequence
    start = time.time()
    # Get number of samples from config
    with open(params.config, 'r') as f:
        num_samples = len(f.readlines())
    # Launch cluster and pipeline
    uuid = fix_launch(params)
    launch_cluster(params)
    ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')
    launch_pipeline(params)
    # Blocks until all workers are idle
    stop = time.time()
    # Collect metrics from cluster
    collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)
    # Apply "Insta-kill" alarm to every worker
    map(apply_alarm_to_instance, ids)
    # Kill leader
    logging.info('Killing Leader')
    leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]
    apply_alarm_to_instance(leader_id, threshold=5)
    # Generate Run Report
    avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]
    total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)
    # Report values
    output = ['UUID: {}'.format(uuid),
              'Number of Samples: {}'.format(num_samples),
              'Number of Nodes: {}'.format(params.cluster_size),
              'Cluster Name: {}'.format(params.cluster_name),
              'Source Bucket: {}'.format(params.bucket),
              'Average Hourly Cost: ${}'.format(avg_hourly_cost),
              'Cost per Instance: ${}'.format(total_cost),
              'Availability Zone: {}'.format(avail_zone),
              'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),
              'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),
              'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),
              'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]
    with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:
        f.write('\n'.join(output))
    # You're done!
    logging.info('\n\nScaling Test Complete.')
Ejemplo n.º 11
0
    def __str__(self):

        start_str = "None"
        if self.start != None:
            start_str = datetime.isoformat(self.start)
        end_str = "None"
        if self.end != None:
            end_str = datetime.isoformat(self.end)
        return '\t'.join( [self.topic, start_str, end_str] )
Ejemplo n.º 12
0
def test_get_orders(client):
    data = client.get_orders(data=dict(
        search_start_date=datetime.isoformat(
            datetime(2016, 1, 1)),
        search_end_date=datetime.isoformat(
            datetime(2016, 2, 1)),
    ))

    assert data.get('status') == 'success'
    assert data.get('orders')
Ejemplo n.º 13
0
def connect_db(connection, hostname='localhost'):
    uplink = None
    if connection is None:
        try:
            uplink = r.connect(host=hostname)  # We don't select a specific database or table.
            print("{}: BaseDB: Connected to rethinkdb successfully.".format(dt.isoformat(dt.now())), file=sys.stderr)
        except RqlDriverError:
            print("{}: BaseDB: Failed to connect to rethinkdb. Check the daemon status and try again.".format(dt.isoformat(dt.now())), file=sys.stderr)
    else:
        uplink = connection
        print("{}: BaseDB: Reusing connection to rethinkdb.".format(dt.isoformat(dt.now())), file=sys.stderr)
    return uplink
Ejemplo n.º 14
0
def Monitorlist(request):
    proname = request.GET.get('proname')
    client = MongoClient()
    db = client.mcollection
    mcursor = db.mcollection.find({'project':proname})
    mchecks = []
    for document in mcursor:
        mchecks.append(document['mtype'])
    mcheck = list(set(mchecks))
    presentdate = datetime.isoformat(datetime.now())
    lasthourdate = datetime.isoformat(datetime.now()-timedelta(hours =1))
    mdategraph ={'lasthourdate':lasthourdate,"presentdate":presentdate}
    return render(request,'devapps/monitorlistparm.html',{'mcheck':mcheck,'proname':proname,"mdategraph":mdategraph})
def first_day_datetime(first_day_date, time_of_course):
    """Get the date and time of the first course of the semester
    for a specific course.
    Returns a tuple of 2 isoformat datetime strings."""
    hours = findall(r"[\dd']+", time_of_course)

    start_t = time(int(hours[0]), int(hours[1]))
    end_t = time(int(hours[2]), int(hours[3]))

    # Get start_time and end_time by concatenating the previous result.
    start_datetime = datetime.isoformat(datetime.combine(first_day_date, start_t))
    end_datetime = datetime.isoformat(datetime.combine(first_day_date, end_t))
    return (start_datetime, end_datetime)
Ejemplo n.º 16
0
    def power_outage_scan(self):
        vin = self.get_voltage_in()
        if vin is None:
            return

        if vin < self.min_voltage_in:
            self._power_out = True
            self.send_email_notification('{} Power Outage. Vin= {}'.format(datetime.isoformat(), vin))

        elif self._power_out:
            self._power_out = False
            self.send_email_notification('{} Power Returned. Vin= {}'.format(datetime.isoformat(), vin))

        return vin
Ejemplo n.º 17
0
def db_lookup_disk(conn, device):
    """Looks up a disk from the disks database.
    :param device: The device to search
    """
    print("{}: LookupDisk: disks query for: {}".format(dt.isoformat(dt.now()), device), file=sys.stderr)
    try:
        result = r.db('wanwipe').table('disks').get_all(get_disk_serial(device), index='serial_no').run(conn)
        for document in result:  # Look over the returned documents. There should only be one, serial_no is unique.
            print("{}: LookupDisk: disks query found a matching document: {}".format(dt.isoformat(dt.now()), document), file=sys.stderr)
            return document.get('id')  # Always return the current disk, skipping the below.
        print("{}: LookupDisk: couldn't find that disk. Creating new disk.".format(dt.isoformat(dt.now())), file=sys.stderr)  # We didn't return above, so...
        return db_register_disk(conn, device)  # Register the disk with the disks database.
    except RqlRuntimeError as kaboom:
        print("{}: LookupDisk: disks lookup failed somehow: {}".format(dt.isoformat(dt.now()), kaboom), file=sys.stderr)
Ejemplo n.º 18
0
def timer_fired():
    """Do periodic housekeeping tasks. I'm a transient thread!
    """
    conn = connect_db(None)  # Assure this thread is connected to rethinkdb.
    try:
        r.now().run(conn, time_format="raw")  # Ping the database first.
    except RqlDriverError:
        print("{}: Database connection problem. Reconnecting.".format(dt.isoformat(dt.now())), file=sys.stderr)
        conn = connect_db(None)  # Make very sure we're connected to rethinkdb.
    db_refresh(conn)  # Refresh the timestamp on the machine_state
    conn.close()
    print("{}: Waiting for device changes (press ctrl+c to exit)".format(dt.isoformat(dt.now())))

    return True  # To fire the timer again.
Ejemplo n.º 19
0
    def get_report_page(self, page=1):
        headers = {'content-type': 'application/json'}
        params = {'workspace_id': 288359,
                  'since': datetime.isoformat(self.since),
                  'until': datetime.isoformat(self.until),
                  'user_agent': 'merxbj_api'}

        if page > 1:
            params['page'] = page

        response = requests.get('https://toggl.com/reports/api/v2/details',
                                auth=('84a975312e9cf2f028037f5f292133ce', 'api_token'),
                                headers=headers,
                                params=params)
        return response.json()
Ejemplo n.º 20
0
 def _format_time(timestamp):
     t = datetime.isoformat(datetime.utcfromtimestamp(timestamp))
     # if the time has no fractional value the iso timestamp won't contain the trailing zeros
     # add them back in if they are missing for consistency in file naming
     if t[-3] == ':':
         t += '.000000'
     return t
Ejemplo n.º 21
0
    def print_interfaces_removed(object_path, interfaces):
        t = "{}".format(dt.isoformat(dt.now()))
        print("{}: Lost {}:".format(t, _sanitize_dbus_path(object_path)))
        for interface in interfaces:
            if 'block_devices' in object_path:
                # Is it special? (Ram, loopback, optical)
                if 'ram' in object_path:
                    pass  # It's a ramdisk. Do not want.
                elif 'loop' in object_path:
                    pass  # It's a loopback. Do not want.
                elif 'sr' in object_path:
                    pass  # It's an optical. Do not want.
                else:  # It's a normal block_device.
                    if contains_digits(_sanitize_dbus_path(object_path)):  # It's a partition.
                        print("{}: P {}".format(t, _sanitize_dbus_key(interface)))
                        #_print_interfaces_and_properties(interfaces_and_properties)
                    else:  # It's a raw device, what we're looking for!
                        db_remove_disk(db_conn, _extract_dbus_blockpath(object_path))
                        print("{}: B {} from key: {}".format(t, _sanitize_dbus_key(interface), machine_state_uuid))
                        #_print_interfaces_and_properties(interfaces_and_properties)

            elif 'drives' in object_path:
                print("{}: D {}".format(t, _sanitize_dbus_key(interface)))
                #_print_interfaces_and_properties(interfaces_and_properties)
            elif 'jobs' in object_path:
                print("{}: J {}".format(t, _sanitize_dbus_key(interface)))
            else:  # Not a block_device or a drive, eh?
                print("{}: * {}".format(t, _sanitize_dbus_key(interface)))
        sys.stdout.flush()
Ejemplo n.º 22
0
def pickle_domain(domain):
    """Pickles a set of boto SimpleDB items, using the pickle protocol #2 for size,
    and speed, returning the filename of the resulting pickle."""
    # We can't pickle the boto Item objects directly, but we can build up a dict
    # containing all the relevant items and their attributes, extracted from the boto Item
    item_dict = {}
    
    item_count = domain.get_metadata().item_count 
    ten_percent = int(math.floor(item_count / 10))
    i = 0
    for item in domain:
        i = i + 1
        item_dict[item.name] = dict(item)
        if (i % ten_percent == 0):
            sdb_progress_callback(i, item_count)
    
    sdb_progress_callback(i, item_count)
        
    # build a sensible, UTC-timestamped filename, eg: SomeDomain_2011-07-20T22_15_27_839618
    filename = (domain.name + '_' + datetime.isoformat(datetime.utcnow())).replace(':', '_').replace('.', '_')
    
    with codecs.open(filename, 'wb') as f:
        # protocols > 0 write binary
        # in terms of speed: protocol 2 > 1 > 0
        # specifying a protocol version < 0 selects the highest supported protocol
        pickle.dump(item_dict, f, 2)
    
    return filename
Ejemplo n.º 23
0
def get_current_time():
    cur_time = re.sub(r'\.[0-9]*$', '+08:00', datetime.isoformat(datetime.now()))
    print('>> current-time: %s' % cur_time)

    response.set_header('Cache-Control', 'no-cache')
    response.content_type = 'application/json'
    return { 'status': 'OK', 'time': cur_time }
Ejemplo n.º 24
0
def runStage(*, clientURI, stage, cmd_wrapper):
        ix = stage.ix

        logger.info("Running stage %i (on %s). Memory requested: %.2f", ix, clientURI, stage.mem)
        try:
            command_to_run  = ((cmd_wrapper + ' ') if cmd_wrapper else '') + ' '.join(stage.cmd)
            logger.info(command_to_run)
            command_logfile = stage.log_file

            # log file for the stage
            of = open(command_logfile, 'a')
            of.write("Stage " + str(ix) + " running on " + socket.gethostname()
                     + " (" + clientURI + ") at " + datetime.isoformat(datetime.now(), " ") + ":\n")
            of.write(command_to_run + "\n")
            of.flush()
            
            args = shlex.split(command_to_run)
            process = subprocess.Popen(args, stdout=of, stderr=of, shell=False)
            #client.addPIDtoRunningList(process.pid)
            process.communicate()
            #client.removePIDfromRunningList(process.pid)
            ret = process.returncode 
            of.close()
        except Exception as e:
            logger.exception("Exception whilst running stage: %i (on %s)", ix, clientURI)
            return ix, e
        else:
            logger.info("Stage %i finished, return was: %i (on %s)", ix, ret, clientURI)
            return ix, ret
Ejemplo n.º 25
0
def get_time_stamp():
    """
    Return a numeric UTC time stamp without microseconds.
    """
    from datetime import datetime
    return (datetime.isoformat(datetime.utcnow()).split('.')[0]
            .replace('T', '').replace(':', '').replace('-', ''))
Ejemplo n.º 26
0
def put(request, testplan_id):
    """
    Update existing test plan based on testplan_id.
    """
    try:
        in_json = json.loads(request.body)
    except ValueError:
        return HttpResponseBadRequest("invalid JSON")

    dbc = db_model.connect()
    try:
        testplan = dbc.testplan.find_one({"_id": ObjectId(testplan_id)})
    except InvalidId:
        return HttpResponseNotFound()
    if testplan is None:
        return HttpResponseNotFound()
    else:
        if "name" in in_json:
            testplan['name'] = in_json['name']
        if "description" in in_json:
            testplan['description'] = in_json['description']
        if "rules" in in_json:
            testplan['rules'] = [rule_model.validate(rule) for rule in in_json['rules']]
            if None in in_json['rules']:
                return HttpResponse("invalid rule(s) provided")
        try:
            testplan['updatedAt'] = datetime.isoformat(datetime.now())
            dbc.testplan.save(testplan)
        except DuplicateKeyError:
            return HttpResponseBadRequest("testplan named '%s' already exists" % in_json['name'])
        logger.info("test plan '%s' updated by '%s'" % (testplan_id, request.user['username']))
        return HttpResponse(status=200)
Ejemplo n.º 27
0
    def build_event_descriptor(site_event):
        event_id = site_event.dr_event.event_id
        modification_number = site_event.dr_event.modification_number

        modification_reason = None
        priority = 1
        test_event = False
        vtn_comment = None

        created_date_time = datetime.isoformat(datetime.now())
        created_date_time = created_date_time[0:-7]

        event_status = 'far'
        if site_event.status == 'cancelled' or site_event.status == 'CANCELED':
            event_status = 'cancelled'
        else:
            event_status = site_event.dr_event.status

        return oadr_20b.eventDescriptorType(eventID=event_id,
                                            modificationNumber=modification_number,
                                            modificationReason=modification_reason,
                                            priority=priority,
                                            createdDateTime=created_date_time,
                                            eventStatus=event_status,
                                            testEvent=test_event,
                                            vtnComment=vtn_comment)
Ejemplo n.º 28
0
def put(request, rule_id):
    """
    Update existing rule based on rule_id.
    """
    try:
        in_json = json.loads(request.body)
    except ValueError:
        return HttpResponseBadRequest("invalid JSON")

    dbc = db_model.connect()
    try:
        rule = dbc.rule.find_one({"_id": ObjectId(rule_id)})
    except InvalidId:
        return HttpResponseNotFound()
    if rule is None:
        return HttpResponseNotFound()
    else:
        in_json['createdAt'] = rule['createdAt']
        rule = rule_model.validate(in_json)
        if rule is None:
            return HttpResponseBadRequest("invalid rule")
        else:

            rule['_id'] = ObjectId(rule_id)
            rule['updatedAt'] = datetime.isoformat(datetime.now())
            dbc.rule.save(rule)
            r = JsonResponse({"id": rule_id})
            r['location'] = "/api/rule/%s" % rule_id
            logger.info("rule '%s' updated by '%s'" % (rule_id, request.user['username']))
            return r
Ejemplo n.º 29
0
    def print_initial_objects(managed_objects):
        t = "{}".format(dt.isoformat(dt.now()))
        print("{}: Known:".format(t))
        for object_path, interfaces_and_properties in managed_objects.items():
            if 'block_devices' in object_path:
                # Is it special? (Ram, loopback, optical)
                if 'ram' in object_path:
                    pass  # It's a ramdisk. Do not want.
                    #print("{}: R {}".format(t, object_path))
                elif 'loop' in object_path:
                    pass  # It's a loopback. Do not want.
                    #print("{}: L {}".format(t, object_path))
                elif 'sr' in object_path:
                    pass  # It's an optical. Do not want.
                    #print("{}: O {}".format(t, object_path))
                else:  # It's a normal block_device.
                    if contains_digits(_sanitize_dbus_path(object_path)):  # It's a partition.
                        print("{}: P {}".format(t, _sanitize_dbus_path(object_path)))
                        #_print_interfaces_and_properties(interfaces_and_properties)
                    else:  # It's a raw device, what we're looking for!
                        db_found_disk(db_conn, _extract_dbus_blockpath(object_path))
                        print("{}: B {} to key: {}".format(t, _sanitize_dbus_path(object_path), machine_state_uuid))
                        #_print_interfaces_and_properties(interfaces_and_properties)

            elif 'drives' in object_path:
                if not _check_property(interfaces_and_properties, 'MediaRemovable'):
                    print("{}: D {}".format(t, _sanitize_dbus_path(object_path)))
                    #_print_interfaces_and_properties(interfaces_and_properties)
            else:  # Not a block_device or a drive, eh?
                print("{}: * {}".format(t, _sanitize_dbus_path(object_path)))
                _print_interfaces_and_properties(interfaces_and_properties)
        sys.stdout.flush()
def createLogRecord(dictIn):
    # make an event message that can be used
    # for syslog, text, json, etc.
    log = {}
    log['category'] = 'ldapChange'
    log['summary'] = '{0} {1} {2} '.format(dictIn['actor'], dictIn['changetype'], dictIn['dn'])
    log['details'] = dict()
    log['details']['actor'] = dictIn['actor']
    log['details']['changetype'] = dictIn['changetype']
    log['details']['dn'] = dictIn['dn']
    # gather the actions and change lists into pairs of action,value and action:attribute,value
    if 'actions' in dictIn.keys():
        actionpairs = zip(dictIn['actions'], dictIn['actions'][1:])[::2]
        changepairs = zip(dictIn['changes'], dictIn['changes'][1:])[::2]
        log['details']['actionpairs'] = actionpairs
        log['details']['changepairs'] = changepairs

        # what to show in the summary field?
        if ('member' in dictIn['actions']) or 'memberUid' in dictIn['actions']:
            # likely a group membership change (add or delete)
            for a, v in actionpairs:
                if v in ('member', 'memberUid'):
                    for ca, cv in changepairs:
                        if ca == a + ':' + v:
                            if ' {0}: {1} '.format(ca, cv) not in log['summary']:
                                log['summary'] += ' {0}: {1} '.format(ca, cv)
        else:
            # default message logs action pairs
            for action, value in actionpairs:
                log['summary'] += '{0} {1} '.format(action, value)

    log['timestamp'] = datetime.isoformat(datetime.now(tzlocal()))
    return(log)
Ejemplo n.º 31
0
    def on_message(self, message):
        print("<< WEBSOCKET INPUT")
        dct = json.loads(message)
        logjson(dct, False)
        # if dct.has_key("action"): # python2
        if "action" in dct:  # python3
            print(
                "~~~ device sent action request, acknowledging / answering...")
            if dct['action'] == "register":
                # ITA-GZ1-GL, PSC-B01-GL, etc.
                if "model" in dct and dct["model"]:
                    self.device_model = dct["model"]
                    print("We are dealing with a {} model.".format(
                        self.device_model))
                print("~~~~ register")
                data = {
                    "error": 0,
                    "deviceid": dct['deviceid'],
                    "apikey": self.uuid,
                    "config": {
                        "hb": 1,
                        "hbInterval": 145
                    }
                }
                logjson(data)
                self.write_message(data)
            if dct['action'] == "date":
                print("~~~~ date")
                data = {
                    "error": 0,
                    "deviceid": dct['deviceid'],
                    "apikey": self.uuid,
                    "date": datetime.isoformat(datetime.today())[:-3] + 'Z'
                }
                logjson(data)
                self.write_message(data)
            if dct['action'] == "query":
                print("~~~~ query")
                data = {
                    "error": 0,
                    "deviceid": dct['deviceid'],
                    "apikey": self.uuid,
                    "params": 0
                }
                logjson(data)
                self.write_message(data)
            if dct['action'] == "update":
                print("~~~~ update")
                data = {
                    "error": 0,
                    "deviceid": dct['deviceid'],
                    "apikey": self.uuid
                }
                logjson(data)
                self.write_message(data)
                self.setup_completed = True
        # elif dct.has_key("sequence") and dct.has_key("error"): # python2
        elif "sequence" in dct and "error" in dct:
            print("~~~ device acknowledged our action request (seq {}) "
                  "with error code {}".format(
                      dct['sequence'],
                      dct['error']  # 404 here
                  ))
            if dct['error'] == 404:
                self.upgrade = True
        else:
            print("## MOEP! Unknown request/answer from device!")

        if self.setup_completed and not self.test:
            # switching relais on and off - for fun and profit!
            data = {
                "action": "update",
                "deviceid": dct['deviceid'],
                "apikey": self.uuid,
                "userAgent": "app",
                "sequence": str(int(time() * 1000)),
                "ts": 0,
                "params": {
                    "switch": "off"
                },
                "from": "hackepeter"
            }
            logjson(data)
            self.write_message(data)
            data = {
                "action": "update",
                "deviceid": dct['deviceid'],
                "apikey": self.uuid,
                "userAgent": "app",
                "sequence": str(int(time() * 1000)),
                "ts": 0,
                "params": {
                    "switch": "on"
                },
                "from": "hackepeter"
            }
            logjson(data)
            self.write_message(data)
            data = {
                "action": "update",
                "deviceid": dct['deviceid'],
                "apikey": self.uuid,
                "userAgent": "app",
                "sequence": str(int(time() * 1000)),
                "ts": 0,
                "params": {
                    "switch": "off"
                },
                "from": "hackepeter"
            }
            logjson(data)
            self.write_message(data)
            data = {
                "action": "update",
                "deviceid": dct['deviceid'],
                "apikey": self.uuid,
                "userAgent": "app",
                "sequence": str(int(time() * 1000)),
                "ts": 0,
                "params": {
                    "switch": "on"
                },
                "from": "hackepeter"
            }
            logjson(data)
            self.write_message(data)
            data = {
                "action": "update",
                "deviceid": dct['deviceid'],
                "apikey": self.uuid,
                "userAgent": "app",
                "sequence": str(int(time() * 1000)),
                "ts": 0,
                "params": {
                    "switch": "off"
                },
                "from": "hackepeter"
            }
            logjson(data)
            self.write_message(data)
            self.test = True

        if self.setup_completed and self.test and not self.upgrade:

            hash_user1 = self.getFirmwareHash(upgrade_file_user1)
            hash_user2 = self.getFirmwareHash(upgrade_file_user2)

            if hash_user1 and hash_user2:
                udir = 'ota'
                data = {
                    "action": "upgrade",
                    "deviceid": dct['deviceid'],
                    "apikey": self.uuid,
                    "userAgent": "app",
                    "sequence": str(int(time() * 1000)),
                    "ts": 0,
                    "params": {
                        # the device expects two available images, as the original
                        #   firmware splits the flash into two halfs and flashes
                        #   the inactive partition (ping-pong).
                        # as we don't know which partition is (in)active, we
                        # provide our custom image as user1 as well as user2.
                        # unfortunately this also means that our firmware image
                        # must not exceed FLASH_SIZE / 2 - (bootloader - spiffs)
                        "binList": [
                            {
                                "downloadUrl":
                                "http://%s:%s/%s/%s" %
                                (myserver, port, udir, upgrade_file_user1),
                                # the device expects and checks the sha256 hash of
                                #   the transmitted file
                                "digest":
                                hash_user1,
                                "name":
                                "user1.bin"
                            },
                            {
                                "downloadUrl":
                                "http://%s:%s/%s/%s" %
                                (myserver, port, udir, upgrade_file_user2),
                                # the device expects and checks the sha256 hash of
                                #   the transmitted file
                                "digest":
                                hash_user2,
                                "name":
                                "user2.bin"
                            }
                        ],
                        # if `model` is set to sth. else (I tried) the websocket
                        #   gets closed in the middle of the JSON transmission
                        "model":
                        self.device_model,
                        # the `version` field doesn't seem to have any effect;
                        #   nevertheless set it to a ridiculously high number
                        #   to always be newer than the existing firmware
                        "version":
                        "23.42.5"
                    }
                }
                logjson(data)
                self.write_message(data)
                self.upgrade = True
Ejemplo n.º 32
0
 def isoformat(self):
     dt = self._dt
     return datetime.isoformat(dt)
Ejemplo n.º 33
0
def _now():
    """Get a timestamp for right now, formatted according to ISO 8601."""
    return datetime.isoformat(datetime.now())
Ejemplo n.º 34
0
def update_kpi(couch_user, password, couch_server):
    """
    Connect to StatusDB projects and flocell databases and 
    compute current KPIs to be added to a new document in the kpi database.
    This script should be run every hour, and it should have a yaml configuration 
    file in '$HOME/.dashbordrc' with program parameters:

        couch_user: foo_user

        password: bar_password

        couch_server: baz_couch.server:port
    """

    couch = Server("http://{}:{}@{}".format(couch_user, password,
                                            couch_server))
    if couch:
        projects_db = couch["projects"]
        worksets_db = couch["worksets"]
        kpi_db = couch["kpi"]
        bioinfo_db = couch["bioinfo_analysis"]
    else:
        raise IOError("Cannot connect to couchdb")

    p_summary = projects_db.view('project/summary')
    p_samples = projects_db.view('project/samples')
    p_dates = projects_db.view('project/summary_dates', group_level=1)
    w_proj = worksets_db.view('project/ws_proj')
    b_samples = bioinfo_db.view('genomics_dashboard/run_lane_sample_status')

    kpis = {}
    kpis["s_initqc"] = SuccessInitialQC()
    kpis["s_libprep"] = SuccessLibraryPrep()
    kpis["s_bioinfo"] = SuccessBioinfo()
    kpis["p_finlib"] = ProjectsFinishedLib()
    kpis["p_libprep"] = ProjectsLibraryPrep()
    kpis["p_inprod"] = ProjectsInProduction()
    kpis["p_inapp"] = ProjectsInApplications()
    kpis["p_oseven"] = ProjectsOpenedLastSeven()
    kpis["p_cseven"] = ProjectsClosedLastSeven()
    kpis["p_onweeks"] = ProjectsOpenedNWeeks()
    kpis["p_cnweeks"] = ProjectsClosedNWeeks()
    kpis["pl_rcsamples"] = LoadInitialQCSamples()
    kpis["pl_rclanes"] = LoadInitialQCLanes()
    kpis["pl_libprepq"] = LoadLibraryPrepQueue()
    kpis["pl_libprep"] = LoadLibraryPrep()
    kpis["pl_bioinfoq"] = LoadBioinfoQueue()
    kpis["pl_bioinfo"] = LoadBioinfo()
    kpis["t_libprep"] = TaTLibprep()
    kpis["t_initqc"] = TaTInitialQC()
    kpis["t_libproj"] = TaTLibprepProj()
    kpis["t_finproj"] = TaTFinlibProj()
    kpis["t_seq"] = TaTSequencing()
    kpis["t_bioinfo"] = TaTBioinformatics()
    kpis["t_libprep_90th"] = TaTLibprep_90th()
    kpis["t_initqc_90th"] = TaTInitialQC_90th()
    kpis["t_libproj_90th"] = TaTLibprepProj_90th()
    kpis["t_finproj_90th"] = TaTFinlibProj_90th()
    kpis["t_seq_90th"] = TaTSequencing_90th()
    kpis["t_bioinfo_90th"] = TaTBioinfo_90th()

    logging.info("Generating KPIs")
    for proj_key, doc in ProjectViewsIter(p_summary, p_samples, p_dates,
                                          w_proj, b_samples):
        logging.debug("Processing project: {}".format(proj_key))
        for kpiobj in kpis.values():
            try:
                kpiobj(doc)
            except:
                logging.debug("Exception in processing {} - {}".format(
                    proj_key, kpiobj.__class__))
                pass

    logging.info("Generating KPIs from lims")
    pl_seq = sequencing_load()

    logging.info("Summarizing KPIs")
    out = {}
    utc_time = "{}+0000".format(datetime.isoformat(datetime.utcnow()))
    limit_file = os.path.join(os.path.dirname(__file__), "config/limits.json")
    out["time_created"] = utc_time
    out["version"] = p_version
    with open(limit_file, "rU") as f:
        out["limits"] = json.load(f)
    #GAH! Too much repetition! I should probably rewrite this part
    out["process_load"] = {
        "initial_qc_samples": kpis["pl_rcsamples"].summary(),
        "initial_qc_lanes": kpis["pl_rclanes"].summary(),
        "library_prep": kpis["pl_libprep"].summary(),
        "library_prep_queue": kpis["pl_libprepq"].summary(),
        "bioinformatics_queue": kpis["pl_bioinfoq"].summary(),
        "bioinformatics": kpis["pl_bioinfo"].summary(),
        "miseq_pooling_queue": pl_seq[0],
        "miseq_sequencing_queue_p": pl_seq[1],
        "miseq_sequencing_queue_l": pl_seq[2],
        "hiseqX_pooling_queue": pl_seq[3],
        "hiseqX_sequencing_queue_p": pl_seq[4],
        "hiseqX_sequencing_queue_l": pl_seq[5],
        "hiseqX_sequencing_l": pl_seq[6],
        "miseq_sequencing_l": pl_seq[7],
        "novaseq_pooling_queue": pl_seq[8],
        "novaseq_sequencing_queue_p": pl_seq[9],
        "novaseq_sequencing_queue_l": pl_seq[10],
        "novaseq_sequencing_l": pl_seq[11]
    }
    out["success_rate"] = {
        "initial_qc": kpis["s_initqc"].summary(),
        "library_prep": kpis["s_libprep"].summary(),
        "sequencing": sequencing_success(),
        "bioinformatics": kpis["s_bioinfo"].summary()
    }
    out["turnaround_times"] = {
        "library_prep": kpis["t_libprep"].summary(),
        "initial_qc": kpis["t_initqc"].summary(),
        "finished_library_project": kpis["t_finproj"].summary(),
        "library_prep_project": kpis["t_libproj"].summary(),
        "bioinformatics": kpis["t_bioinfo"].summary(),
        "sequencing": kpis["t_seq"].summary(),
        "library_prep_90th": kpis["t_libprep_90th"].summary(),
        "initial_qc_90th": kpis["t_initqc_90th"].summary(),
        "finished_library_project_90th": kpis["t_finproj_90th"].summary(),
        "library_prep_project_90th": kpis["t_libproj_90th"].summary(),
        "bioinformatics_90th": kpis["t_bioinfo_90th"].summary(),
        "sequencing_90th": kpis["t_seq_90th"].summary()
    }
    out["projects"] = {
        "opened_last_7_days": kpis["p_oseven"].summary(),
        "in_applications": kpis["p_inapp"].summary(),
        "closed_last_7_days": kpis["p_cseven"].summary(),
        "in_production": kpis["p_inprod"].summary(),
        "opened_n_weeks_ago": kpis["p_onweeks"].summary(),
        "closed_n_weeks_ago": kpis["p_cnweeks"].summary(),
        "finished_libraries": kpis["p_finlib"].summary(),
        "library_prep": kpis["p_libprep"].summary()
    }
    kpi_db.create(out)
Ejemplo n.º 35
0
def format_storage_path(instance, filename):
    date = datetime.isoformat(instance.pub_date).split('T')[0]
    project_slug = instance.project.slug
    return 'projects/{0}/posts/{1}/{2}'.format(project_slug, date, filename)
new_f.login("myriadsdigital", "Password*123")
#print ('Files in home direcotory:---')
#new_f.retrlines('LIST')
#print('\n')


vlist=os.listdir('.')
name=''
for item in vlist:
        match=re.search(r'system\w*', item)
        if match:
                name=match.group()[6:]
print name

f = open('locallog.txt','w')
time = datetime.now()
if os.system("ps -e|grep mplayer") == 0 :
    f.write('System  no '+name+'  working  at    '+ datetime.isoformat(time))
else:
    f.write('System  no '+name+' not working  at    '+ datetime.isoformat(time))
f.close()

f = open("locallog.txt", "rb")
new_f.cwd("/log")
new_f.storlines("APPE log.txt", f)
new_f.retrlines('LIST')
print ('Log uploaded sucessfully!\n')
new_f.quit()

f.close()
Ejemplo n.º 37
0
class MozDefEvent():
    # create requests session to allow for keep alives
    httpsession = Session()
    # Turns off needless and repetitive .netrc check for creds
    httpsession.trust_env = False
    debug = False
    verify_certificate = True
    # Never fail (ie no unexcepted exceptions sent to user, such as server/network not responding)
    fire_and_forget_mode = True
    log = {}
    log['timestamp'] = datetime.isoformat(datetime.now())
    log['hostname'] = socket.getfqdn()
    log['processid'] = os.getpid()
    log['processname'] = sys.argv[0]
    log['severity'] = 'INFO'
    log['summary'] = None
    log['category'] = 'event'
    log['tags'] = list()
    log['details'] = dict()

    def __init__(self,
                 url='http://localhost/events',
                 summary=None,
                 category='event',
                 severity='INFO',
                 tags=[],
                 details={}):
        self.summary = summary
        self.category = category
        self.severity = severity
        self.tags = tags
        self.details = details
        self.url = url
        self.hostname = socket.getfqdn()

    def send(self,
             timestamp=None,
             summary=None,
             category=None,
             severity=None,
             tags=None,
             details=None,
             hostname=None):
        log_msg = copy.copy(self.log)

        if timestamp is None:
            log_msg['timestamp'] = self.timestamp
        else:
            log_msg['timestamp'] = timestamp

        if summary is None:
            log_msg['summary'] = self.summary
        else:
            log_msg['summary'] = summary

        if category is None:
            log_msg['category'] = self.category
        else:
            log_msg['category'] = category

        if severity is None:
            log_msg['severity'] = self.severity
        else:
            log_msg['severity'] = severity

        if tags is None:
            log_msg['tags'] = self.tags
        else:
            log_msg['tags'] = tags

        if details is None:
            log_msg['details'] = self.details
        else:
            log_msg['details'] = details

        if hostname is None:
            log_msg['hostname'] = self.hostname
        else:
            log_msg['hostname'] = hostname

        if type(log_msg['details']) != dict:
            raise MozDefError('details must be a dict')
        elif type(log_msg['tags']) != list:
            raise MozDefError('tags must be a list')
        elif summary is None:
            raise MozDefError('Summary is a required field')

        if self.debug:
            print(json.dumps(log_msg, sort_keys=True, indent=4))
            #return

        try:
            r = self.httpsession.post(self.url,
                                      json.dumps(log_msg),
                                      verify=self.verify_certificate)
        except Exception as e:
            if not self.fire_and_forget_mode:
                raise e
Ejemplo n.º 38
0
    def _wiki2git(self):
        revs = self._pending_revs()
        synced_files = []
        for rev in revs:
            #
            # Summary/commit message parsing.
            #
            git_commit_message = rev[1]['comment'] or '*** празно резюме ***'

            #
            # User parsing.
            #
            wiki_user = rev[1]['user']
            # Ignore our own sync edits in the wiki.
            if wiki_user == self.site.username():
                continue
            try:
                author = self._usermap[wiki_user]['author']
                email = self._usermap[wiki_user]['email']
            except KeyError:
                author = wiki_user
                email = ''
            git_author = git.Actor(author, email)
            git_committer = git.Actor(author, email)

            #
            # Page/file parsing.
            #
            # We cannot have both a file and a directory with the same name, so where we have
            # 'Page' and 'Page/doc', the latter gets converted to 'Page.d/doc'.
            file_name = rev[0].replace('/', '.d/')
            # If we've configured a file extension for syntax highlighting, add it, but only for
            # files in the root of the namespace/repository (the rest will likely be 'Page/doc').
            if self.force_ext and '.d/' not in file_name:
                file_name = file_name + '.' + self.force_ext
            file_path = os.path.join(self.repo.working_dir, file_name)

            #
            # Committing.
            #
            # To avoid conflicts as much as possible, perform git pull right before we apply the
            # change and commit it.
            self._pull()
            if rev[2] in ['edit', 'resync']:
                os.makedirs(os.path.dirname(file_path), exist_ok=True)
                if rev[2] == 'resync' and os.path.exists(file_path):
                    with open(file_path, 'r') as f:
                        if rev[1]['text'] == f.read().rstrip('\n'):
                            # The on-wiki and Git versions are the same. No need to resync.
                            continue
                with open(file_path, 'w') as f:
                    f.write(rev[1]['text'] + '\n')
                self.repo.index.add([file_path])
            elif rev[2] in ['delete', 'move']:
                self.repo.index.remove([file_path], working_tree=True)
            else:
                print('Error: Unknown revision type: "{}"'.format(rev[2]))
                continue
            print('Syncing to Git: {}'.format(file_name))
            self.repo.index.commit(
                    git_commit_message,
                    author=git_author,
                    committer=git_committer,
                    author_date=dt.isoformat(rev[1]['timestamp'], timespec='seconds'),
                    commit_date=dt.isoformat(rev[1]['timestamp'], timespec='seconds'))
            # Push after each commit. It's inefficient, but should minimize possible conflicts.
            self.repo.git.push()
            synced_files.append(file_name)
        return synced_files
Ejemplo n.º 39
0
def get_type(in_type):
    if in_type == "RSA":
        return "ssh-rsa"
    else:
        return "ssh-dsa"


gerrit_config = get_broken_config(GERRIT_CONFIG)
secure_config = get_broken_config(GERRIT_SECURE_CONFIG)

DB_USER = gerrit_config.get("database", "username")
DB_PASS = secure_config.get("database", "password")
DB_DB = gerrit_config.get("database", "database")

db_backup_file = "%s.%s.sql" % (DB_DB, datetime.isoformat(datetime.now()))
db_backup_path = os.path.join(GERRIT_BACKUP_PATH, db_backup_file)
if not options.skip_dump:
    log.info('Backup mysql DB to ' + db_backup_path)
    retval = os.system("mysqldump --opt -u%s -p%s %s | gzip -9 > %s.gz" %
                       (DB_USER, DB_PASS, DB_DB, db_backup_path))
    if retval != 0:
        print "Problem taking a db dump, aborting db update"
        sys.exit(retval)

log.info('Connect to mysql DB')
conn = MySQLdb.connect(user=DB_USER, passwd=DB_PASS, db=DB_DB)
cur = conn.cursor()

log.info('Connecting to launchpad')
launchpad = Launchpad.login_with('Gerrit User Sync',
Ejemplo n.º 40
0
                return resp
            else:
                username2 = credMsgJS["username"]
                if dbContainsUsername(username2):
                    resp = jsonify({"Auth error": "user name already in use"})
                    resp.status_code = http.HTTPStatus.UNAUTHORIZED
                    return resp
                userToDB(emailS, username2, concatHash)
                username = username2
        else:
            if not login:
                username2 = credMsgJS["username"]
                if username != username2:
                    resp = jsonify({"Auth error": "user name can not be changed"})
                    resp.status_code = http.HTTPStatus.UNAUTHORIZED
                    return resp
        id2loginDate[id2] = datetime.now()
        id2username[id2] = username
        return jsonify({"id": id2, "username": username})
    except Exception as ex:
        resp = jsonify({"error": str(ex)})
        resp.status_code = 401
        return resp


if __name__ == "__main__":
    print("today", datetime.isoformat(datetime.now()))
    app.run(debug=True, use_reloader=False, host="0.0.0.0")

# http://raspberrylan.1qgrvqjevtodmryr.myfritz.net:8080/
Ejemplo n.º 41
0
 def __init__(self, msg_id, commenter_id, message):
     self.msg_id = msg_id
     self.commenter_id = commenter_id
     self.message = message
     self.posted_date = datetime.isoformat(datetime.utcnow(), ' ')
Ejemplo n.º 42
0
 def create_zun_service(self, values):
     values['created_at'] = datetime.isoformat(timeutils.utcnow())
     zun_service = models.ZunService(values)
     zun_service.save()
     return zun_service
Ejemplo n.º 43
0
    def insert_valves(self, datetime=datetime.now(), name='', state=False):
        args = {'datetime': datetime.isoformat(), 'name': name, 'state': state}

        self._set(DatabaseInterface.INSERT_VALVES_STR.format(**args))
Ejemplo n.º 44
0
    def insert_pressure(self, datetime=datetime.now(), pressure=0):
        args = {'datetime': datetime.isoformat(), 'pressure': pressure}

        self._set(DatabaseInterface.INSERT_PRESSURE_STR.format(**args))
Ejemplo n.º 45
0
def time_now():
    now = datetime.now()
    return datetime.isoformat(now)
Ejemplo n.º 46
0
def get_utc_isodate():
    return datetime.isoformat(datetime.utcnow())
Ejemplo n.º 47
0
    def run(self, **inputs):
        """Execute this interface.

        This interface will not raise an exception if runtime.returncode is
        non-zero.

        Parameters
        ----------
        inputs : allows the interface settings to be updated

        Returns
        -------
        results :  an InterfaceResult object containing a copy of the instance
        that was executed, provenance information and, if successful, results
        """
        from ...utils.profiler import ResourceMonitor

        enable_rm = config.resource_monitor and self.resource_monitor
        force_raise = not getattr(self.inputs, 'ignore_exception', False)
        self.inputs.trait_set(**inputs)
        self._check_mandatory_inputs()
        self._check_version_requirements(self.inputs)
        interface = self.__class__
        self._duecredit_cite()

        # initialize provenance tracking
        store_provenance = str2bool(
            config.get('execution', 'write_provenance', 'false'))
        env = deepcopy(dict(os.environ))
        if self._redirect_x:
            env['DISPLAY'] = config.get_display()

        runtime = Bunch(cwd=os.getcwd(),
                        returncode=None,
                        duration=None,
                        environ=env,
                        startTime=dt.isoformat(dt.utcnow()),
                        endTime=None,
                        platform=platform.platform(),
                        hostname=platform.node(),
                        version=self.version)

        mon_sp = None
        if enable_rm:
            mon_freq = float(
                config.get('execution', 'resource_monitor_frequency', 1))
            proc_pid = os.getpid()
            iflogger.debug(
                'Creating a ResourceMonitor on a %s interface, PID=%d.',
                self.__class__.__name__, proc_pid)
            mon_sp = ResourceMonitor(proc_pid, freq=mon_freq)
            mon_sp.start()

        # Grab inputs now, as they should not change during execution
        inputs = self.inputs.get_traitsfree()
        outputs = None

        try:
            runtime = self._run_interface(runtime)
            outputs = self.aggregate_outputs(runtime)
        except Exception as e:
            import traceback
            # Retrieve the maximum info fast
            runtime.traceback = traceback.format_exc()
            # Gather up the exception arguments and append nipype info.
            exc_args = e.args if getattr(e, 'args') else tuple()
            exc_args += (
                'An exception of type %s occurred while running interface %s.'
                % (type(e).__name__, self.__class__.__name__), )
            if config.get('logging', 'interface_level',
                          'info').lower() == 'debug':
                exc_args += ('Inputs: %s' % str(self.inputs), )

            runtime.traceback_args = ('\n'.join(
                ['%s' % arg for arg in exc_args]), )

            if force_raise:
                raise
        finally:
            # This needs to be done always
            runtime.endTime = dt.isoformat(dt.utcnow())
            timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime)
            runtime.duration = (timediff.days * 86400 + timediff.seconds +
                                timediff.microseconds / 1e6)
            results = InterfaceResult(interface,
                                      runtime,
                                      inputs=inputs,
                                      outputs=outputs,
                                      provenance=None)

            # Add provenance (if required)
            if store_provenance:
                # Provenance will only throw a warning if something went wrong
                results.provenance = write_provenance(results)

            # Make sure runtime profiler is shut down
            if enable_rm:
                import numpy as np
                mon_sp.stop()

                runtime.mem_peak_gb = None
                runtime.cpu_percent = None

                # Read .prof file in and set runtime values
                vals = np.loadtxt(mon_sp.fname, delimiter=',')
                if vals.size:
                    vals = np.atleast_2d(vals)
                    runtime.mem_peak_gb = vals[:, 1].max() / 1024
                    runtime.cpu_percent = vals[:, 2].max()

                    runtime.prof_dict = {
                        'time': vals[:, 0].tolist(),
                        'cpus': vals[:, 1].tolist(),
                        'rss_GiB': (vals[:, 2] / 1024).tolist(),
                        'vms_GiB': (vals[:, 3] / 1024).tolist(),
                    }

        return results
Ejemplo n.º 48
0
def main():
    with open(sys.argv[1], "r") as f:
        conf = json.load(f)

    created = (
      datetime.now(tz=timezone.utc)
      if conf["created"] == "now"
      else datetime.fromisoformat(conf["created"])
    )
    mtime = int(created.timestamp())
    store_dir = conf["store_dir"]

    from_image = load_from_image(conf["from_image"])

    with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
        layers = []
        layers.extend(add_base_layers(tar, from_image))

        start = len(layers) + 1
        for num, store_layer in enumerate(conf["store_layers"], start=start):
            print("Creating layer", num, "from paths:", store_layer,
                  file=sys.stderr)
            info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
            layers.append(info)

        print("Creating layer", len(layers) + 1, "with customisation...",
              file=sys.stderr)
        layers.append(
          add_customisation_layer(
            tar,
            conf["customisation_layer"],
            mtime=mtime
          )
        )

        print("Adding manifests...", file=sys.stderr)

        image_json = {
            "created": datetime.isoformat(created),
            "architecture": conf["architecture"],
            "os": "linux",
            "config": overlay_base_config(from_image, conf["config"]),
            "rootfs": {
                "diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
                "type": "layers",
            },
            "history": [
                {
                  "created": datetime.isoformat(created),
                  "comment": f"store paths: {layer.paths}"
                }
                for layer in layers
            ],
        }

        image_json = json.dumps(image_json, indent=4).encode("utf-8")
        image_json_checksum = hashlib.sha256(image_json).hexdigest()
        image_json_path = f"{image_json_checksum}.json"
        add_bytes(tar, image_json_path, image_json, mtime=mtime)

        manifest_json = [
            {
                "Config": image_json_path,
                "RepoTags": [conf["repo_tag"]],
                "Layers": [layer.path for layer in layers],
            }
        ]
        manifest_json = json.dumps(manifest_json, indent=4).encode("utf-8")
        add_bytes(tar, "manifest.json", manifest_json, mtime=mtime)

        print("Done.", file=sys.stderr)
Ejemplo n.º 49
0
def main():

    # this feautre is the "Save_Snapshot" feature
    # client_query() # this queries current org and all client information and builds database
    # exit()

    # clientDB = [{'mac': '00:a6:ca:bd:30:34', 'vlan': '9', 'port': 'Gi1/0/48'},..] List
    clientDB = [
    ]  # this will hold all the client information (port/vlan/etc) from the org_wide_client output
    #client_load(clientDB)  #this loads meraki "snapshot", doesn't include voice vlans, need to fix that

    msDB = []
    sDir = "cisco/paris_raw2/"
    if allowHistoryConfigs:
        cisco_load(msDB, clientDB, sDir)

    #print(clientDB)

    timestamp = datetime.isoformat(datetime.now())
    log(f'Starting autoMAC at {timestamp}')

    if reset_switches:  #resets all ports on inscope devies with the tags
        switch_wipe()
        exit()

    # Fire up Meraki API and build DB's
    dashboard = meraki.DashboardAPI(api_key=None,
                                    base_url='https://api.meraki.com/api/v1/',
                                    log_file_prefix=__file__[:-3],
                                    print_console=False)
    loop = True

    #load Port Config detault library
    PC = portConfig()

    networks = dashboard.organizations.getOrganizationNetworks(org_id)
    last_changes = []
    loops_change = 0
    while loop:

        print()
        print(
            f'{bcolors.HEADER}**************************** START LOOP *****************************'
        )
        print()

        networks_inscope = []  # target networks
        for n in networks:
            if n['tags'] is not None and 'autoMAC' in n['tags']:
                networks_inscope.append(n)

        online_devices = []
        stats = dashboard.organizations.getOrganizationDevicesStatuses(org_id)
        for s in stats:
            if s['status'] == 'online' or s['status'] == 'alerting':
                online_devices.append(s['serial'])

        switches_inscope = []
        devices_inscope = []
        for n in networks_inscope:
            #devices = dashboard.devices.getNetworkDevices(n['id'])
            devices = dashboard.networks.getNetworkDevices(n['id'])
            if len(devices) == 0:
                continue
            for d in devices:
                if 'tags' in d and tag_switch_TARGET in d['tags']:
                    #dashboard.devices.blinkNetworkDeviceLeds(n['id'], serial=d['serial'], duration=5, duty=10, period=100 )
                    if d['serial'] in online_devices:
                        dashboard.devices.blinkDeviceLeds(serial=d['serial'],
                                                          duration=5,
                                                          duty=10,
                                                          period=100)
                        devices_inscope.append(d)

        print(f'{bcolors.OKBLUE}Networks Inscope:')
        for n in networks_inscope:
            name = n['name']
            oid = n['organizationId']
            nid = n['id']
            oname = dashboard.organizations.getOrganization(oid)['name']
            print(
                f'\t{bcolors.OKBLUE}Network[{bcolors.WARNING}{name}{bcolors.OKBLUE}] Network_ID[{bcolors.WARNING}{nid}{bcolors.OKBLUE}] Org[{bcolors.WARNING}{oname}{bcolors.OKBLUE}] Org_ID[{bcolors.WARNING}{oid}{bcolors.OKBLUE}]'
            )
        print()

        print(f'{bcolors.OKBLUE}Devices Inscope:')
        for d in devices_inscope:
            name = d['name']
            model = d['model']
            nid = d['networkId']
            fw = d['firmware']
            print(
                f'\t{bcolors.OKBLUE}Switch[{bcolors.WARNING}{name}{bcolors.OKBLUE}] Model[{bcolors.WARNING}{model}{bcolors.OKBLUE}] Firmware[{bcolors.WARNING}{fw}{bcolors.OKBLUE}] Network_ID[{bcolors.WARNING}{nid}{bcolors.OKBLUE}]'
            )
        # sets all switches to "primed"
        # switch_wipe(devices_inscope)

        # identify all the ports that we're updating
        ports_inscope = []
        for d in devices_inscope:
            #ports = dashboard.switch_ports.getDeviceSwitchPorts(d['serial'])
            ports = dashboard.switch.getDeviceSwitchPorts(d['serial'])

            serial = d['serial']
            dname = d['name']
            portsIS = 0
            for p in ports:
                if p['tags'] is not None and tag_port_TARGET in p['tags']:
                    newPort = p
                    newPort['serial'] = d['serial']
                    newPort['netId'] = d['networkId']
                    newPort['model'] = d['model']
                    #ports_inscope.append(p)
                    ports_inscope.append(newPort)
                    portsIS += 1
                #else:
                #exit()
            print()
            print(
                f'{bcolors.OKBLUE}Checking switch ports:  Switch [{bcolors.WARNING}{dname}{bcolors.OKBLUE}] SN [{bcolors.WARNING}{serial}{bcolors.OKBLUE}] PortsInscope[{bcolors.WARNING}{portsIS}{bcolors.OKBLUE}]'
            )

        #print("Ports Inscope:")
        #print(ports_inscope)

        print()
        #        print("Current Switch Clients:")
        #        print()

        port_changes = []
        total_clients = 0
        # new network device function, works at network level instead of querying each switch
        for n in networks_inscope:
            netid = n['id']
            clients = dashboard.networks.getNetworkClients(netid,
                                                           perPage=1000,
                                                           total_pages='all')

            print(
                f'{bcolors.OKBLUE}Detected total {bcolors.WARNING}{len(clients)}{bcolors.OKBLUE} in Network[{bcolors.WARNING}{n["name"]}{bcolors.OKBLUE}]'
            )
            total_clients = total_clients + len(clients)
            print(
                f'{bcolors.OKBLUE}TOTAL Clients Detected: {bcolors.WARNING}{len(clients)}{bcolors.OKBLUE} in {bcolors.WARNING}ALL{bcolors.OKBLUE} networks'
            )

            for c in clients:  #interate through the ACTIVE clients on dashboard (target switches)
                if c['status'] == "Offline":
                    continue
                update = False
                serial = ""
                serial = c['recentDeviceSerial']
                mac = c['mac']
                vlan = int(c['vlan'])
                if c['switchport'] == 'AGGR/0': continue
                port = int(
                    c['switchport'])  #yes this is right, coming from API call

                # check to see if the MAC is in the source clientDB, if so, return the object
                sourceClient = inDB(mac, clientDB)
                if sourceClient is not None:  #if there is a response
                    #   print(f'Client in db[{sourceClient}] MAC[mac]')
                    if not int(
                            sourceClient['vlan']
                    ) == vlan:  #if NOT the Dashboard-Clients VLAN matches the configured one....
                        vlan = int(sourceClient['vlan'])
                        update = True
                        ovlan = int(c['vlan'])
                        #print(f'{bcolors.OKGREEN}{c}{bcolors.ENDC}')

                    if isActivePort(serial, port, ports_inscope):

                        print(
                            f'{bcolors.FAIL}VLAN Misconfiguration Client activeVlan[{bcolors.WARNING}{ovlan}{bcolors.FAIL}] OriginalVlan[{bcolors.WARNING}{vlan}{bcolors.FAIL}] Mac[{bcolors.WARNING}{c["mac"]}{bcolors.FAIL}] Manufacturer[{bcolors.WARNING}{c["manufacturer"]}{bcolors.FAIL}] Desc[{bcolors.WARNING}{c["description"]}{bcolors.FAIL}]'
                        )

                        # if there's a change, make an update
                        if update and c[
                                'status'] == "Online":  #Changes is to be made and port is up
                            #print(c)
                            print(
                                f'\t{bcolors.OKBLUE}Updating switch[{bcolors.WARNING}{serial}{bcolors.OKBLUE}] client[{bcolors.WARNING}{mac}{bcolors.OKBLUE}] VLAN[{bcolors.WARNING}{vlan}{bcolors.OKBLUE}] PORT[{bcolors.WARNING}{port}{bcolors.OKBLUE}]'
                            )
                            log(f'Updating switch[{serial}] client[{mac}] VLAN[{vlan}] PORT[{port}]'
                                )

                            print()
                            change = [serial, port, vlan, mac]
                            if not change in last_changes:
                                port_changes.append(change)
                                if WRITE:
                                    last_changes.append(
                                        change
                                    )  #only assume it's changing if the script isn't ReadOnly
                            else:
                                print(
                                    f'\t-{bcolors.FAIL}Duplicate change detected, bypassing{bcolors.OKGREEN}'
                                )
                                loops_change = 0
                            print()
                        elif update:
                            print(
                                f'\t-{bcolors.OKGREEN}Port on switch[{bcolors.WARNING}{serial}{bcolors.OKGREEN}] Port[{bcolors.WARNING}{port}{bcolors.OKGREEN}] should be updated but Status[{bcolors.WARNING}{c["status"]}{bcolors.OKGREEN}]'
                            )
                            log(f'Port on switch[{serial}] Port[{port}] should be updated but Status[{c["status"]}]'
                                )

                            print()

                else:  #if sourceClient is NONE, then it wasn't found, what do we do with unknown clients?
                    #if not c['status']  == 'Online': #make sure it's recent and not something crazy
                    #    print(f'{bcolors.BLINK_FAIL}BARF{bcolors.ENDC}')
                    #    continue
                    switchserial = c['recentDeviceSerial']
                    portnum = c['switchport']
                    if isActivePort(serial, port,
                                    ports_inscope) and allowProfileConfigs:
                        print(
                            f'{bcolors.FAIL}Unknown {bcolors.OKBLUE}client[{bcolors.WARNING}{c["mac"]}{bcolors.OKBLUE}] on Port[{bcolors.WARNING}{port}{bcolors.OKBLUE}] on Serial[{bcolors.WARNING}{serial}{bcolors.OKBLUE}]'
                        )
                        #client = {'id': 'kf8522b', 'mac': 'b0:7d:47:c1:80:92', 'description': 'SEPB07D47C18092', 'ip': '192.168.128.16', 'ip6': None, 'ip6Local': 'fe80:0:0:0:b27d:47ff:fec1:8092', 'user': None, 'firstSeen': '2020-09-08T18:20:47Z', 'lastSeen': '2020-09-26T21:01:08Z', 'manufacturer': 'Cisco Systems', 'os': None, 'recentDeviceSerial': 'Q2MW-MW3X-LG3U', 'recentDeviceName': 'Core B', 'recentDeviceMac': 'ac:17:c8:f7:8b:00', 'ssid': None, 'vlan': 999, 'switchport': '13', 'usage': {'sent': 3176, 'recv': 24}, 'status': 'Online', 'notes': None, 'smInstalled': False, 'groupPolicy8021x': None}
                        #print(f'{bcolors.OKGREEN}{c}')
                        portStats = dashboard.switch.getDeviceSwitchPortsStatuses(
                            c['recentDeviceSerial'])
                        portTMP = PC.findClient(c, portStats)
                        if not portTMP == None:
                            portTMP['portId'] = c['switchport']
                            print(
                                f'\t{bcolors.OKBLUE}Received Default port template:{bcolors.WARNING} {portTMP}'
                            )
                            if WRITE:
                                portTMP['stpGuard'] = portTMP[
                                    'stpGuard'].lower()
                                res = dashboard.switch.updateDeviceSwitchPort(
                                    c['recentDeviceSerial'], **portTMP)
                                log(f'Profiled port[{portTMP}]')
                                log(f'[WRITE] API updateDeviceSwitchPorts')
                            else:
                                print(f'{bcolors.OKGREEN}[READ-ONLY BYPASS]')
                                log(f'[READ-ONLY] API updateDeviceSwitchPorts')

                        else:
                            print(
                                f'\t{bcolors.FAIL}No default template found.... suggest adding one based on the above values{bcolors.OKBLUE}'
                            )

                        print()
                        time.sleep(3)

        if not allowProfileConfigs:
            print()
            print(f'{bcolors.OKGREEN} Auto-Profiles are disabled')

        print()
        print(
            f'{bcolors.OKBLUE}Ports to change: {bcolors.WARNING}{port_changes}{bcolors.OKBLUE}'
        )
        print(
            f'{bcolors.OKBLUE}Last Changes: {bcolors.WARNING}{last_changes}{bcolors.OKBLUE}'
        )
        print()

        if len(port_changes) == 0:
            print(f'{bcolors.OKGREEN} NO CHANGES')
            loops_change += 1
            if loops_change >= 5:  #clear the last change buffer after 5 loops without any modified change
                last_changes = []
                loops_change = 0

        for pc in port_changes:
            S1 = pc[0]
            P1 = pc[1]
            vlan = pc[2]
            mac = pc[3]
            #newPort = dashboard.switch_ports.getDeviceSwitchPort(S1, P1)
            newPort = dashboard.switch.getDeviceSwitchPort(S1, P1)

            if not newPort[
                    'type'] == 'access':  #short circuit if it's not an access port.
                continue

            #Pull original switch port config
            orig_mac = findMAC(msDB, mac)
            orig_sw = getSW(msDB, orig_mac['name'])
            orig_port = orig_sw.parsedCFG(orig_mac['port'])
            oVlan = vlan
            try:
                oVlan = orig_port['vlan']
            except:
                print(f'Failure on port: {orig_port} NewPort[{newPort}]')

            if 'voiceVlan' in orig_port:
                ovoiceVlan = orig_port['voiceVlan']
            else:
                ovoiceVlan = None
            oPort = orig_mac['port']
            oName = orig_mac['name']
            stpG = orig_port['stpGuard']
            print(
                f'\t{bcolors.OKGREEN}Original port config found: vlan[{bcolors.WARNING}{oVlan}{bcolors.OKGREEN}] voice[{bcolors.WARNING}{ovoiceVlan}{bcolors.OKGREEN}] port[{bcolors.WARNING}{oPort}{bcolors.OKGREEN}] stpGuard[{bcolors.WARNING}{stpG}{bcolors.OKGREEN}] Switch[{bcolors.WARNING}{oName}{bcolors.OKGREEN}]'
            )

            if tag_port_AUTO in newPort['tags']:
                tags = tag_port_TARGET + " " + tag_port_AUTO
            else:
                tags = tag_port_DONE

            #make tags an array
            tags = tags.split(' ')
            #if the access vlan or voice vlan mismatches, configure the port
            if not str(newPort['vlan']) == oVlan or not str(
                    newPort['voiceVlan']) == ovoiceVlan:
                print(
                    f'\t\t{bcolors.OKGREEN}Changing Switch[{bcolors.WARNING}{S1}{bcolors.OKGREEN}] Port[{bcolors.WARNING}{P1}{bcolors.OKGREEN}] to VLAN[{bcolors.WARNING}{oVlan}{bcolors.OKGREEN}] VoiceVlan[{bcolors.WARNING}{ovoiceVlan}{bcolors.OKGREEN}] stpGuard[{bcolors.WARNING}{stpG}{bcolors.OKGREEN}]'
                )
                log(f'Changing Switch[{S1}] Port[{P1}] to VLAN[{oVlan}] VoiceVlan[{ovoiceVlan}] stpGuard[{stpG}]'
                    )

                #res = dashboard.switch_ports.updateDeviceSwitchPort(S1, P1, vlan=oVlan, voiceVlan=ovoiceVlan, tags=tags, isolationEnabled=False, stpGuard=stpG)
                if WRITE:
                    res = dashboard.switch.updateDeviceSwitchPort(
                        S1,
                        P1,
                        vlan=oVlan,
                        voiceVlan=ovoiceVlan,
                        tags=tags,
                        isolationEnabled=False,
                        stpGuard=stpG)
                    log(f'[WRITE] API updateDeviceSwitchPorts')
                else:
                    print(f'{bcolors.OKGREEN}[READ-ONLY BYPASS]')
                    log(f'[READ-ONLY] API updateDeviceSwitchPorts')

            else:
                print(
                    f'{bcolors.FAIL}Port is already configured{bcolors.WARNING}!!!!! Clearing port tag{bcolors.OKBLUE}'
                )
                if WRITE:
                    res = dashboard.switch.updateDeviceSwitchPort(S1,
                                                                  P1,
                                                                  tags=tags)
                    log(f'[WRITE] API updateDeviceSwitchPorts')
                else:
                    print(f'{bcolors.OKGREEN}[READ-ONLY BYPASS]')
                    log(f'[READ-ONLY] API updateDeviceSwitchPorts')

            print()

        print()
        print(
            f'{bcolors.HEADER}**************************** END LOOP *****************************'
        )
        print()

        print(f'{bcolors.OKGREEN}Sleep mode.....')
        time.sleep(30)
        print()
        print()
Ejemplo n.º 50
0
def get_sigmf_iso8601_datetime_now():
    return datetime.isoformat(datetime.utcnow()) + 'Z'
Ejemplo n.º 51
0
def main():
    """Run main function."""

    args = argument_parsing()

    fast = not args['no_fast']
    timeout = args['timeout']
    verbose = args['verbose']
    cycles = args['cycles']

    logging_level = logging.WARNING

    if verbose:
        logging_level = logging.INFO

    if args['logging']:
        logging_level = logging.DEBUG

    logging.basicConfig(stream=sys.stdout, level=logging_level)
    obd.logger.setLevel(logging_level)

    logging.info(f"argument --fast: {fast}")
    logging.info(f"argument --timeout: {timeout}")
    logging.info(f"argument --verbose: {verbose}")
    logging.info(f"argument --cycles: {cycles}")
    logging.info(f"argument --logging: {args['logging']} ")
    logging.debug("debug logging enabled")

    connection = get_obd_connection(fast=fast, timeout=timeout)

    elm_version, elm_voltage = get_elm_info(connection)
    logging.info("ELM VERSION: {elm_version}, ELM VOLTAGE: {elm_voltage}")

    custom_commands = load_custom_commands(connection)

    vin = get_vin_from_vehicle(connection)
    logging.info(f"VIN: {vin}")

    base_path = args['base_path']

    output_file_path = (get_directory(
        base_path, vin)) / (get_output_file_name(vin + '-TEST'))
    logging.info(f"output file: {output_file_path}")
    with open(output_file_path, mode='w', encoding='utf-8') as out_file:
        for cycle in range(cycles):
            logging.info(f"cycle {cycle} in {cycles}")
            for command_name in get_command_list():
                logging.info(f"command_name {command_name}")

                iso_format_pre = datetime.isoformat(
                    datetime.now(tz=timezone.utc))

                try:

                    obd_response = execute_obd_command(connection,
                                                       command_name)

                except OffsetUnitCalculusError as e:
                    logging.exception(
                        f"Excpetion: {e.__class__.__name__}: {e}")
                    logging.exception(
                        f"OffsetUnitCalculusError on {command_name}, decoder must be fixed"
                    )
                    logging.exception(f"Exception: {e}")

                except Exception as e:
                    logging.exception(f"Exception: {e}")
                    if not connection.is_connected():
                        logging.error(
                            f"connection failure on {command_name}, reconnecting"
                        )
                        connection.close()
                        connection = get_obd_connection(fast=fast,
                                                        timeout=timeout)

                iso_format_post = datetime.isoformat(
                    datetime.now(tz=timezone.utc))

                obd_response_value = clean_obd_query_response(
                    command_name, obd_response)

                logging.info(
                    f"saving: {command_name}, {obd_response_value}, {iso_format_pre}, {iso_format_post}"
                )

                out_file.write(
                    json.dumps({
                        'command_name': command_name,
                        'obd_response_value': obd_response_value,
                        'iso_ts_pre': iso_format_pre,
                        'iso_ts_post': iso_format_post,
                    }) + "\n")
Ejemplo n.º 52
0
    def capture(self):
        """
        The current state of all processes of a given user.
        By default, the current user is taken and analyzed.
        """
        if self._totals is None:
            self.totals()
        if self._totals is None:
            return {"error": "no totals available"}

        from time import sleep

        self.now = now = datetime.utcnow().replace(tzinfo=utc)
        cpu_pct_sum = 0.0
        cpu_time_sum = 0.0

        if self._calc_tree:
            # used to build the process tree
            par_ch = defaultdict(list)
        procs = []
        # sum up process categories
        proc_stats = defaultdict(lambda: defaultdict(lambda: 0.0))
        # reset all instance counters to 0
        for proc_class in CATEGORY:
            proc_stats[proc_class]["instances"] = 0

        # cpu_percent needs to be called twice for meaningful values
        for p in self.user_processes():
            p.cpu_percent()
        sleep(self.sample_interval)

        def check(fn):
            try:
                return fn()
            except ps.AccessDenied:
                return None

        for p in self.user_processes():
            io = check(p.io_counters)
            mem = p.memory_info_ex()

            # relative cpu time usage
            cpu_times = p.cpu_times()
            time_rel = cpu_times.user + cpu_times.system

            # absolute cpu time usage
            start = datetime.fromtimestamp(p.create_time()).replace(tzinfo=utc)
            time_abs = (now - start).total_seconds()

            # memory in pct of cgroup limit, exclucing swap.
            # i.e. a value near or above 100% indicates excessive usage
            if not "error" in self._totals["mem"]:
                mem_pct = 100. * mem.rss / KBMB**2 / self._totals["mem"][
                    "mem_max"]
            else:
                mem_pct = 0.

            proc_class = classify_proc(p)
            proc_stats[proc_class]["instances"] += 1
            proc_stats[proc_class]["cpu"] += p.cpu_percent()
            proc_stats[proc_class]["mem"] += mem_pct
            proc_stats[proc_class]["time"] += time_rel

            if self._calc_tree:
                for chpid in [ch.pid for ch in p.children()]:
                    par_ch[p.pid].append(chpid)

            procs.append({
                "pid": p.pid,
                # funny thing: name, path and cmdline can be uneqal
                "name": p.name(),
                # The process executable as an absolute path.
                "path": check(p.exe),
                "category": proc_class,
                "command_line": p.cmdline(),
                "open_files": check(p.num_fds),
                #"threads": p.threads(),
                "read": io.read_bytes if io else 0,
                "write": io.write_bytes if io else 0,
                "cpu_percent": p.cpu_percent(),
                "time": {
                    "started": datetime.isoformat(start),
                    "absolute": time_abs,
                    "absolute_h": secs2hms(time_abs),
                    "used": time_rel,
                    "used_h": secs2hms(time_rel),
                    "percent": 100. * time_rel / time_abs,
                },
                "memory": {
                    "real": mem.rss / KBMB**2,
                    "virtual": mem.vms / KBMB**2,
                    "shared": mem.shared / KBMB**2,
                    "percent": 100. * mem_pct,
                }
            })

        if self._calc_tree:
            tree = defaultdict(dict)
            for par, chlds in par_ch.items():
                for ch in chlds:
                    tree[par][ch] = tree[ch]

            roots = set(tree.keys())
            for ch in tree.values():
                for p in ch.keys():
                    roots.remove(p)
            self._tree = [{r: tree[r]} for r in roots]

        self._procs = procs
        for c in proc_stats:  # type for instance counter is 'int'
            proc_stats[c]["instances"] = int(proc_stats[c]["instances"])
        self._proc_stats = proc_stats
        return self._procs, self._tree, self._proc_stats
Ejemplo n.º 53
0
    def run(self, cwd=None, ignore_exception=None, **inputs):
        """Execute this interface.

        This interface will not raise an exception if runtime.returncode is
        non-zero.

        Parameters
        ----------
        cwd : specify a folder where the interface should be run
        inputs : allows the interface settings to be updated

        Returns
        -------
        results :  :obj:`nipype.interfaces.base.support.InterfaceResult`
            A copy of the instance that was executed, provenance information and,
            if successful, results

        """
        from ...utils.profiler import ResourceMonitor

        # if ignore_exception is not provided, taking self.ignore_exception
        if ignore_exception is None:
            ignore_exception = self.ignore_exception

        # Tear-up: get current and prev directories
        syscwd = rgetcwd(error=False)  # Recover when wd does not exist
        if cwd is None:
            cwd = syscwd

        os.chdir(cwd)  # Change to the interface wd

        enable_rm = config.resource_monitor and self.resource_monitor
        self.inputs.trait_set(**inputs)
        self._check_mandatory_inputs()
        self._check_version_requirements(self.inputs)
        interface = self.__class__
        self._duecredit_cite()

        # initialize provenance tracking
        store_provenance = str2bool(
            config.get("execution", "write_provenance", "false"))
        env = deepcopy(dict(os.environ))
        if self._redirect_x:
            env["DISPLAY"] = config.get_display()

        runtime = Bunch(
            cwd=cwd,
            prevcwd=syscwd,
            returncode=None,
            duration=None,
            environ=env,
            startTime=dt.isoformat(dt.utcnow()),
            endTime=None,
            platform=platform.platform(),
            hostname=platform.node(),
            version=self.version,
        )
        runtime_attrs = set(runtime.dictcopy())

        mon_sp = None
        if enable_rm:
            mon_freq = float(
                config.get("execution", "resource_monitor_frequency", 1))
            proc_pid = os.getpid()
            iflogger.debug(
                "Creating a ResourceMonitor on a %s interface, PID=%d.",
                self.__class__.__name__,
                proc_pid,
            )
            mon_sp = ResourceMonitor(proc_pid, freq=mon_freq)
            mon_sp.start()

        # Grab inputs now, as they should not change during execution
        inputs = self.inputs.get_traitsfree()
        outputs = None

        try:
            runtime = self._pre_run_hook(runtime)
            runtime = self._run_interface(runtime)
            runtime = self._post_run_hook(runtime)
            outputs = self.aggregate_outputs(runtime)
        except Exception as e:
            import traceback

            # Retrieve the maximum info fast
            runtime.traceback = traceback.format_exc()
            # Gather up the exception arguments and append nipype info.
            exc_args = e.args if getattr(e, "args") else tuple()
            exc_args += (
                "An exception of type %s occurred while running interface %s."
                % (type(e).__name__, self.__class__.__name__), )
            if config.get("logging", "interface_level",
                          "info").lower() == "debug":
                exc_args += ("Inputs: %s" % str(self.inputs), )

            runtime.traceback_args = ("\n".join(
                ["%s" % arg for arg in exc_args]), )

            if not ignore_exception:
                raise
        finally:
            if runtime is None or runtime_attrs - set(runtime.dictcopy()):
                raise RuntimeError("{} interface failed to return valid "
                                   "runtime object".format(
                                       interface.__class__.__name__))
            # This needs to be done always
            runtime.endTime = dt.isoformat(dt.utcnow())
            timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime)
            runtime.duration = (timediff.days * 86400 + timediff.seconds +
                                timediff.microseconds / 1e6)
            results = InterfaceResult(interface,
                                      runtime,
                                      inputs=inputs,
                                      outputs=outputs,
                                      provenance=None)

            # Add provenance (if required)
            if store_provenance:
                # Provenance will only throw a warning if something went wrong
                results.provenance = write_provenance(results)

            # Make sure runtime profiler is shut down
            if enable_rm:
                import numpy as np

                mon_sp.stop()

                runtime.mem_peak_gb = None
                runtime.cpu_percent = None

                # Read .prof file in and set runtime values
                vals = np.loadtxt(mon_sp.fname, delimiter=",")
                if vals.size:
                    vals = np.atleast_2d(vals)
                    runtime.mem_peak_gb = vals[:, 2].max() / 1024
                    runtime.cpu_percent = vals[:, 1].max()

                    runtime.prof_dict = {
                        "time": vals[:, 0].tolist(),
                        "cpus": vals[:, 1].tolist(),
                        "rss_GiB": (vals[:, 2] / 1024).tolist(),
                        "vms_GiB": (vals[:, 3] / 1024).tolist(),
                    }
            os.chdir(syscwd)

        return results
Ejemplo n.º 54
0
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from datetime import datetime

from ceilometer.image import notifications
from ceilometer import sample
from ceilometer.openstack.common import test


def fake_uuid(x):
    return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)


NOW = datetime.isoformat(datetime.utcnow())

NOTIFICATION_SEND = {
    u'event_type': u'image.send',
    u'timestamp': NOW,
    u'message_id': fake_uuid('a'),
    u'priority': u'INFO',
    u'publisher_id': u'images.example.com',
    u'payload': {
        u'receiver_tenant_id': fake_uuid('b'),
        u'destination_ip': u'1.2.3.4',
        u'bytes_sent': 42,
        u'image_id': fake_uuid('c'),
        u'receiver_user_id': fake_uuid('d'),
        u'owner_id': fake_uuid('e')
    }
Ejemplo n.º 55
0
#If you add more constituents, you will need to expland this dict

UStop = 49.3457868  # north lat
USleft = -124.7844079  # west long
USright = -66.9513812  # east long
USbottom = 24.7433195  # south lat
#US Lat and long coordinates

WDMFileName = 'MetData_20200720.wdm'
wdm.createnewwdm(WDMFileName, overwrite=True)
index = 1
from datetime import datetime
with open("MetLog.txt", 'w') as Logfile:
    Logfile.write("Started Downloading the data at " +
                  datetime.isoformat(datetime.now()) + " and saving in " +
                  WDMFileName + "\n")
    for station in StationList:
        # Going through Each Station in the list
        TimeZoneAdjustment = station[3]
        Logfile.write("Station: " + station[0] + ", Latitude: " +
                      str(station[1]) + ", Longitude: " + str(station[2]) +
                      ", TimeZoneAdjustment: " + str(TimeZoneAdjustment) +
                      "\n")

        for const in Constituent:
            #Going through each constituent
            if station[1]<UStop and station[1]>USbottom and \
                station[2]>USleft and station[2]<USright:
                LDAS_variable = NLDAS_ConstituentDetails[const][2]
                TimeStep = 1
Ejemplo n.º 56
0
class AnnoSyncFactory(Memoizer, DbQueryFactory):
    log = makeSimpleLogger('scibot.db.sync')
    convert = (lambda d: datetime.isoformat(d) + '+00:00', )  # FIXME hack
    query = 'SELECT updated FROM annotation'
    condition = 'WHERE groupid = :groupid ORDER BY updated DESC LIMIT 1'  # default condition

    def __init__(self,
                 api_token=config.api_token,
                 username=config.username,
                 group=config.group,
                 memoization_file=None,
                 condition=''):
        super().__init__(memoization_file,
                         api_token=api_token,
                         username=username,
                         group=group)
        if condition:
            self.condition = condition

    def __call__(self):
        """ block upstream call which does undesirable things """
        raise NotImplemented

    def get_api_rows(self, search_after=None, stop_at=None):
        try:
            if self.group == '__world__':
                self.condition = 'WHERE groupid = :groupid AND userid = :userid ORDER BY updated DESC LIMIT 1'
                userid = f'acct:{self.username}@hypothes.is'  # FIXME other registration authorities
                last_updated = next(
                    self.execute(params={
                        'groupid': self.group,
                        'userid': userid
                    })).updated
            else:
                last_updated = next(
                    self.execute(params={'groupid': self.group})).updated
            self.log.debug(f'last updated at {last_updated} for {self.group}')
        except StopIteration:
            last_updated = None
            self.log.debug(f'no annotations in database for {self.group}')

        if self.memoization_file is None:
            rows = list(
                self.yield_from_api(search_after=last_updated,
                                    stop_at=stop_at))
        else:
            if last_updated:
                rows = [
                    a._row for a in self.get_annos()
                    if a.updated > last_updated
                ]
            else:
                rows = [a._row for a in self.get_annos()]

        return rows

    def sync_annos(self,
                   search_after=None,
                   stop_at=None,
                   api_rows=None,
                   check=False):
        """ batch sync """

        if not api_rows:
            # TODO stream this using generators?
            api_rows = self.get_api_rows(search_after, stop_at)
            if not api_rows:
                self.log.info(f'all annotations are up to date')
                return

        anno_records = [quickload(r) for r in api_rows]

        #qsql = 'SELECT distinct(id, updated) FROM annotation WHERE groupid=:groupid'  # makes it a string :/
        qsql = 'SELECT id, updated, document_id FROM annotation WHERE groupid=:groupid'
        params = dict(groupid=api_rows[0]['group'])
        existing = self.session.execute(qsql, params)
        dext = {
            _get_urlsafe_from_hex(id.hex): (up, did)
            for id, up, did in existing
        }
        dupes = [(a, dext[a['id']][0].isoformat() + '+00:00')
                 for a in anno_records if a['id'] in dext]
        maybe_update = [a['id'] for a, u in dupes if a['updated'] > u]
        assert len(dupes) == len(maybe_update)
        #to_update = tuple(_get_hex_from_urlsafe(i) i for i in maybe_update)
        to_delete = {f'id{i}': v for i, v in enumerate(maybe_update)}
        if to_delete:
            names_or = ' OR '.join(f'id = :{p}' for p in to_delete)
            _dsql = text(f'DELETE FROM annotation WHERE {names_or}')
            bindparams = tuple(
                bindparam(name, type_=URLSafeUUID) for name in to_delete)
            dsql = _dsql.bindparams(*bindparams)
            # delete to avoid collisions, they will be added again later and
            # then finalized when the transaction finishes
            self.session.execute(dsql, to_delete)

        self.log.debug(f'quickload complete for {len(api_rows)} api_rows')

        anno_id_to_doc_id = self.q_create_docs(api_rows)
        self.q_create_annos(anno_records, anno_id_to_doc_id)

        def do_check():
            api_rows  # so that it is accessible in function scope
            self.log.debug('checking for consistency')
            annos = self.session.query(models.Annotation).\
                filter(models.Annotation.groupid == self.group).all()
            #docs = self.session.query(models.Document).all()
            durs = self.session.query(models.DocumentURI).all()
            doc_uris = defaultdict(set)
            _ = [doc_uris[d.document_id].add(d.uri) for d in durs]
            doc_uris = dict(doc_uris)
            #dms = self.session.query(models.DocumentMeta).all()
            #doc_mismatch = [a for a in annos if anno_id_to_doc_id[a.id] != a.document.id]  # super slow due to orm fetches
            doc_missing = [a for a in annos if a.id not in anno_id_to_doc_id]
            assert not doc_missing
            doc_mismatch = [
                a for a in annos if anno_id_to_doc_id[a.id] != a.document_id
            ]
            assert not doc_mismatch, doc_mismatch
            # don't use the orm to do this, it is too slow even if you send the other queries above
            #embed()
            uri_mismatch = [(a.target_uri, doc_uris[a.document_id], a)
                            for a in annos
                            if a.target_uri not in doc_uris[a.document_id]]
            # NOTE hypothesis only allows 1 record per normalized uri, so we have to normalize here as well
            maybe_mismatch = set(
                frozenset(s) for u, s, a in uri_mismatch if not s.add(u))
            h_mismatch = set(s for s in maybe_mismatch
                             if len(frozenset(uri_normalize(u)
                                              for u in s)) > 1)
            self.log.debug(f'h mismatch has {len(h_mismatch)} cases')
            # the above normalization is not sufficient for cases where there are two
            # hypothes.is normalized uris AND a scibot normalized uri as well
            super_mismatch = set(
                s for s in h_mismatch
                if len(frozenset(uri_normalization(u) for u in s)) > 1)
            assert not super_mismatch, super_mismatch

        if check:
            self.session.flush(
            )  # have to run this to get the doc ids to work?
            do_check()

            self.session.commit()
            self.log.debug('commit done')
        else:
            embed()

    def q_create_annos(self, anno_records, anno_id_to_doc_id):
        # NOTE values_sets adds the document_id field and
        # so self.types must be called after values_sets completes
        values_sets = tuple(self.values_sets(anno_records, anno_id_to_doc_id))
        *values_templates, values, bindparams = makeParamsValues(
            *values_sets, types=self.types(anno_records))
        rec_keys = self.get_rec_keys(anno_records)
        sql = text(
            f'INSERT INTO annotation ({", ".join(rec_keys)}) VALUES {", ".join(values_templates)}'
        )
        sql = sql.bindparams(*bindparams)

        def debug_type(column):
            # FIXME column name collisions
            col = models.Annotation.__table__.columns[column]
            ctype = col.type.python_type
            ind = rec_keys.index(column)
            for values, in values_sets:
                if type(values[ind]) != ctype:
                    print('ERROR IN ', values)

        def debug_templates(column):
            col = models.Annotation.__table__.columns[column]
            ctype = col.type.python_type
            for t in values_templates:
                for k, ws_c_vn_ws in zip(rec_keys,
                                         t.strip('(').rstrip(')').split(',')):
                    vn = ws_c_vn_ws.strip().rstrip().strip(':')
                    v = values[vn]
                    if k == column and type(v) != ctype:
                        print('ERROR IN', t)

        try:
            self.session.execute(sql, values)
            self.log.debug('anno execute done')
        except BaseException as e:
            self.log.error('YOU ARE IN ERROR SPACE')
            embed()

        self.session.flush()
        self.log.debug('anno flush done')

    def get_rec_keys(self, anno_records):
        def fix_reserved(k):
            if k == 'references':
                k = '"references"'

            return k

        return [fix_reserved(k) for k in anno_records[0].keys()]

    def values_sets(self, anno_records, anno_id_to_doc_id):
        def type_fix(k, v):  # TODO is this faster or is type_fix?
            if isinstance(v, dict):
                return json.dumps(v)  # FIXME perf?
            elif isinstance(v, list):
                if any(isinstance(e, dict) for e in v):
                    return json.dumps(v)  # FIXME perf?
            return v

        def make_vs(d):
            id = d['id']
            document_id = anno_id_to_doc_id[id]
            d['document_id'] = document_id
            # FIXME does ordering matter here!?
            return [type_fix(k, v) for k, v in d.items()
                    ],  # don't miss the , to make this a value set

        yield from (make_vs(d) for d in anno_records)
        self.log.debug('anno values sets done')

    def types(self, datas):
        def make_types(d):
            def inner(k):
                if k == 'id':
                    return URLSafeUUID
                elif k == 'references':
                    return ARRAY(URLSafeUUID)
                else:
                    return None

            return [inner(k) for k in d]

        yield from (make_types(d) for d in datas)

    @staticmethod
    def uri_records(row):
        uri = row['uri']
        return uri, uri_normalization(uri), quickuri(row)

    def q_prepare_docs(self, rows):
        existing_unnormed = {
            r.uri: (r.document_id, self.convert[0](r.created),
                    self.convert[0](r.updated))
            for r in self.session.execute('SELECT uri, document_id, created, '
                                          'updated FROM document_uri')
        }
        created_updated = {
            docid: (created, updated)
            for _, (docid, created, updated) in existing_unnormed.items()
        }
        _existing = defaultdict(set)
        _ = [
            _existing[uri_normalization(uri)].add(docid)
            for uri, (docid, created, updated) in existing_unnormed.items()
        ]
        assert not [_ for _ in _existing.values() if len(_) > 1
                    ]  # TODO proper handling for this case
        h_existing_unnormed = {
            uri_normalize(uri): docid
            for uri, (docid, created, updated) in existing_unnormed.items()
        }
        existing = {k: next(iter(v))
                    for k, v in _existing.items()
                    }  # FIXME issues when things get big
        latest_existing = max(
            u
            for c, u in created_updated.values()) if created_updated else None

        new_docs = {
        }  # FIXME this is completely opaque since it is not persisted anywhere
        for row in sorted(rows, key=lambda r: r['created']):
            id = row['id']
            uri, uri_normed, (created, updated, claims) = self.uri_records(row)
            try:
                docid = existing[uri_normed]
                dc, du = created_updated[docid]
                doc = models.Document(id=docid, created=dc, updated=du)
                if doc.updated < updated:
                    # FIXME TODO update the record?
                    #self.log.warning('YOU ARE NOT UPDATING A DOC WHEN YOU SHOULD!!!!!!\n'
                    #f'{docid} {doc.updated} {updated}')
                    pass

                do_claims = False
            except KeyError as e:
                if existing:
                    if row['updated'] <= latest_existing:
                        # only need to worry if we are recreating
                        raise e
                if uri_normed not in new_docs:
                    do_claims = True
                    doc = models.Document(created=created, updated=updated)
                    self.session.add(doc)  # TODO perf testing vs add_all
                    new_docs[uri_normed] = doc
                else:
                    do_claims = False
                    doc = new_docs[uri_normed]

            #if type(doc.created) == str:
            #embed()
            yield id, doc

            if uri_normalize(uri) not in h_existing_unnormed:
                # NOTE allowing only the normalized uri can cause confusion (i.e. see checks in sync_annos)
                h_existing_unnormed[uri_normalize(uri)] = doc
                # TODO do these get added automatically if their doc gets added but exists?
                doc_uri = models.DocumentURI(document=doc,
                                             claimant=uri,
                                             uri=uri,
                                             type='self-claim',
                                             created=created,
                                             updated=updated)
                yield None, doc_uri

            # because of how this schema is designed
            # the only way that this can be fast is
            # if we assume that all claims are identical
            # FIXME if there is a new claim type then we are toast though :/
            # the modelling here assumes that title etc can't change
            #print(id, uri, uri_normed, row['user'], row['uri'], row['created'])
            if do_claims:
                for claim in claims:
                    #print(id, uri, uri_normed, claim['claimant'], claim['type'], claim['value'])
                    dm = models.DocumentMeta(document=doc,
                                             created=created,
                                             updated=updated,
                                             **claim)
                    yield None, dm

    def q_create_docs(self, rows):
        ids_docs = list(self.q_prepare_docs(rows))
        docs = sorted(set(d for i, d in ids_docs if i),
                      key=lambda d: d.created)
        uri_meta = list(d for i, d in ids_docs if not i)
        assert len(uri_meta) == len(set(uri_meta))

        # TODO skip the ones with document ids
        self.insert_bulk(docs, {'document': ['created', 'updated']})

        for um in uri_meta:
            um.document_id = um.document.id
            um.document = None
            del um.document  # have to have this or doceument overrides document_id

        self.insert_bulk(uri_meta)
        self.session.expunge_all()  # preven attempts to add unpersisted
        self.session.flush()
        self.log.debug('finished inserting docs')
        anno_id_to_doc_id = {i: d.id for i, d in ids_docs}
        return anno_id_to_doc_id

    def sync_anno_stream(self, search_after=None, stop_at=None):
        """ streaming one anno at a time version of sync """
        for row in self.yield_from_api(search_after=last_updated,
                                       stop_at=stop_at):
            yield row, 'TODO'
            continue
            # TODO
            datum = validate(row)  # roughly 30x slower than quickload
            # the h code I'm calling assumes these are new annos
            datum['id'] = row['id']
            datum['created'] = row['created']
            datum['updated'] = row['updated']
            document_dict = datum.pop('document')
            document_uri_dicts = document_dict['document_uri_dicts']
            document_meta_dicts = document_dict['document_meta_dicts']
            a = [
                models.Annotation(**d,
                                  document_id=dbdocs[uri_normalize(
                                      d['target_uri'])].id) for d in datas
            ]  # slow
            self.log.debug('making annotations')
            self.session.add_all(a)
            self.log.debug('adding all annotations')
Ejemplo n.º 57
0
 def start(self):
     self.open = True
     self.time = datetime.now()
     self.timestamp = datetime.isoformat(self.time)
     return self
Ejemplo n.º 58
0
import pytest
from app import create_app
from app.db import connect_to_db
from app.db import db
from app.enums import FlaskEnv
from app.models.course_model import Course
from app.models.review_quote_model import ReviewQuote
from app.models.talk_model import Talk
from app.models.user_model import User
from app.utilities.init_db import create_db
from pytz import utc

# from app.utilities.init_db import drop_db

future_iso_date = datetime.isoformat(datetime.now(utc) + timedelta(days=30))
past_iso_date = datetime.isoformat(datetime.now(utc) - timedelta(days=30))

course_with_coupons_and_quotes = {
    "name":
    "Awesome Course",
    "link":
    "https://udemy.com/awesomecourse",
    "description":
    "Whatta course!",
    "imageName":
    "course_image.png",
    "coupons": [
        {
            "code": "NOT_EXPIRED",
            "utcExpirationISO": future_iso_date,
Ejemplo n.º 59
0
 def log(self, desc, data=None, data_type=None, debug=False):
     with self.lock:
         try:
             desc = u'(%s) %s' % (str(self.log_id), desc)
             if self.mute or data_type in self.smute or (debug and
                                                         not self.debug):
                 return
             if debug and data_type is None:
                 data_type = 'debug'
             ts = unicode(
                 datetime.isoformat(datetime.now()) +
                 ' ' if self.formal else '').encode('utf-8')
             if self.color and data_type != 'table':
                 if data_type == 'comment':
                     print(ts + BLUE + desc.strip().encode('utf-8') +
                           NORMAL)
                 elif data_type == 'title':
                     print(ts + BOLD + BLUE + desc.strip().encode('utf-8') +
                           NORMAL)
                 elif data_type == 'error':
                     print(ts + BOLD + RED + desc.encode('utf-8') + NORMAL,
                           file=sys.stderr)
                 elif data_type == 'warn':
                     print(ts + BOLD + YELLOW + desc.encode('utf-8') +
                           NORMAL,
                           file=sys.stderr)
                 elif data_type == 'ok':
                     print(ts + BOLD + GREEN + desc.encode('utf-8') +
                           NORMAL)
                 elif desc is not None:
                     print(ts + BOLD + desc.encode('utf-8') + NORMAL)
             elif data_type != 'table':
                 # Some trickery to ensure this is working for special chars in a demstart environment.
                 prefix = ts + (unicode(data_type) or u'info').upper()
                 postfix = ' ' + desc
                 all = unicode(prefix + postfix)
                 print(all.encode('utf-8'))
             if data is not None:
                 if _have_pygment and self.color and data_type == 'xml':
                     try:
                         data = etree.tostring(data)
                     except AttributeError:
                         pass
                     except TypeError:
                         pass
                     try:
                         data = data.decode('utf8')
                     except AttributeError:
                         data = unicode(data, 'utf8')
                     except UnicodeDecodeError:
                         pass
                     except UnicodeEncodeError:
                         pass
                     print(highlight(data, self.lexer, self.formatter))
                 elif _have_pygment and self.color and data_type == 'json':
                     print(highlight(data, self.json_lexer, self.formatter))
                 elif data_type in ('pp', 'error'):
                     if isinstance(data, CaseInsensitiveDict) or isinstance(
                             data, dict):
                         if len(data) == 0:
                             print('{empty dict}',
                                   file=(sys.stderr if data_type == 'error'
                                         else sys.stdout))
                         else:
                             longest = max([len(k) for k in data.keys()])
                             for k, v in data.iteritems():
                                 print(((u'  %%-%is   %%s' % longest) %
                                        (k, v)).encode('utf-8'),
                                       file=(sys.stderr if data_type
                                             == 'error' else sys.stdout))
                             print('',
                                   file=(sys.stderr if data_type == 'error'
                                         else sys.stdout))
                     elif isinstance(data, unicode):
                         print(data.encode('utf-8'),
                               file=(sys.stderr if data_type == 'error' else
                                     sys.stdout))
                     else:
                         pp.pprint(data)
                 elif data_type == 'table':
                     lengths = [len(x) for x in desc]
                     max_length = self.max_column_width
                     for row in data:
                         lengths = [
                             min(max_length,
                                 max(lengths[n], len(unicode(x or '-'))))
                             for n, x in enumerate(row)
                         ]
                     fs = '  '.join([('%%-%is' % l) for l in lengths])
                     if self.color:
                         print(BLUE + (fs % desc).encode('utf-8') + DEFAULT)
                     else:
                         print(fs % desc).encode('utf-8')
                     for row in data:
                         print(fs %
                               tuple([(unicode(ensure_unicode(col))
                                       or u'-')[:max_length].encode('utf-8')
                                      for col in row]))
                 else:
                     print(data.encode('utf-8'))
             sys.stdout.flush()
         except Exception as e:
             print("LOGGING ERROR: " + str(e), file=sys.stderr)
Ejemplo n.º 60
0
def write_optout_code(sender, instance, created, raw, **kwargs):
    if created and not raw:  #create means that a new DB entry is created, raw is set when fixtures are being loaded
        instance.optout_code = md5(instance.name+instance.addr+datetime.isoformat(datetime.now())).hexdigest()
        instance.save()