示例#1
0
文件: websocket.py 项目: nova9/rawbot
async def start(resume):
    '''Connection to the API'''
    async with websockets.connect('wss://gateway.discord.gg?encoding=json&v=6',
                                  loop=asyncio.get_event_loop()) as websocket:
        start_time = time.time()
        if not resume:
            hello = parse_data(await websocket.recv())
            payload = {
                "op": 2,
                "d": {
                    "token": TOKEN,
                    'properties': {
                        '$properties': os.name,
                        '$browser': 'dapi-bot',
                        '$device': 'dapi-bot'
                    },
                    'compress': True,
                    'large_threshold': 250,
                    "presence": {
                        "game": {
                            "name": " with Discord API",
                            "type": 0
                        },
                        "status": "dnd",
                        "since": int(time.time()),
                        "afk": False
                    }
                }
            }
            await websocket.send(json.dumps(payload))
            ready = parse_data(await websocket.recv())
            SESSION_DATA[0] = ready['d']['session_id']
            for i in ready['d']['guilds']:
                x = parse_data(await websocket.recv())
            print('Bot connected')
        else:
            payload = {
                "op": 6,
                "d": {
                    "token": TOKEN,
                    "session_id": SESSION_DATA[0],
                    "seq": SESSION_DATA[1]
                }
            }
            await websocket.send(json.dumps(payload))
            hello = parse_data(await websocket.recv())
            await websocket.recv()
        while True:
            if int(time.time() -
                   start_time) + 1 % hello['d']['heartbeat_interval'] == 0:
                payload = {"op": 1, "d": SESSION_DATA[1]}
                await websocket.send(json.dumps(payload))
                await websocket.recv()
            try:
                event = parse_data(await websocket.recv())
                SESSION_DATA[1] = event['s']
                print(event['t'])  #Logs every event the WS sendss
                await commands.parse_event(event)
            except websockets.exceptions.ConnectionClosed:
                break
def get_data(query, variables):
    has_next = True
    cursor = None
    entities = []

    spinner = Spinner('Fetching Github Data')
    while has_next:
        spinner.next()
        variables['cursor'] = cursor

        rate_limit = get_rate_limit(client)
        handle_rate_limit(rate_limit)
        results = json.loads(client.execute(query, variables))

        if results['data'] and results['data']['search']['edges']:
            nodes = [ edge['node'] for edge in results['data']['search']['edges']]
            for node in nodes:
                entities.append(parse_data(node))
            has_next = results['data']['search']['pageInfo']['hasNextPage']
            cursor = results['data']['search']['pageInfo']['endCursor']
        else:
            logger.warn(f'No data found: {results}')
            has_next = False


    spinner.finish()
    print('\n')
    return entities
示例#3
0
文件: tree.py 项目: clarle/phylo-mcmc
 def __init__(self, **kwargs):
     tau = kwargs.get("tau", None)
     filename = kwargs.get("filename", "infile")
     seqs, num_seqs, len_seqs = parse_data(filename)
     leaf_nodes = self.generate_leaves(seqs, tau)
     self.root = self.generate_tree(leaf_nodes, tau)
     self.curr = self.root
示例#4
0
def main():

    ut = UTILS()
    parameters=ut.read_parameter_file(file = "../para.dat")
    parameters['L']=2

    nrange = np.arange(4,200,4,dtype=int)
    nrange = [nrange[18]]
    dt = 0.02
    parameters['dt'] = dt
    mean_fid = []
    std_fid = []

    for n in nrange:
        #model = ut.quick_setup(argv=['T=%.3f'%T,'n_step=%i'%n_step],file='../para.dat')
        parameters['T'] = n*dt
        parameters['n_step']= n
        file_name = ut.make_file_name(parameters,root="../data/")
        res = parse_data(file_name,v=2) # results stored here ...
        mean_fid.append(np.mean(res['F']))
        std_fid.append(np.std(res['F']))
        prot = res['protocol']
    
    n_step = len(res['protocol'][0])
    plotting.protocol(np.arange(0,n_step)*dt,np.mean(res['protocol'],axis=0),title='$T=%.3f$'%(dt*n_step))
    #plt.scatter(nrange*dt,std_fid)
    #plt.plot(nrange*dt,std_fid)
    plt.show()
示例#5
0
def uploaded_data(content=None):
    if content is not None:
        df = utils.parse_data(content)
        freq_plot, time_plot = utils.generate_plot(df)
        return freq_plot, time_plot
    else:
        raise dash.exceptions.PreventUpdate
示例#6
0
def test_parse_data_invalid(invalid_data_frame):
    invalid_df, cols = invalid_data_frame
    test_row = list(zip(invalid_df[cols].to_numpy()))[0][0]
    try:
        result = parse_data(test_row)
    except Exception as e:
        assert type(e) == KeyError
示例#7
0
    def get_data(self):
        with tf.name_scope('data'):
            #1.construct the dataset for train\validate\test.
            train_data, eval_data, test_data = utils.get_mnist_dataset(
                self.batch_size)  # without val_data
            iterator = tf.data.Iterator.from_structure(
                train_data.output_types, train_data.output_shapes)
            #print(train_data.output_shapes)    #((None, 28, 28), (None, 10))
            self.train_init = iterator.make_initializer(
                train_data)  # initializer for train_data
            self.eval_init = iterator.make_initializer(eval_data)
            self.test_init = iterator.make_initializer(test_data)

            #2.construct the dataset only for dumping the inter-layers
            test = utils.parse_data('data/mnist', 't10k',
                                    False)  #refer to parse_data function
            test_set = tf.data.Dataset.from_tensor_slices(
                test)  #construct the dataset
            iterator1 = test_set.make_one_shot_iterator()

            if self.dump == False:
                img, self.label = iterator.get_next(
                )  #fetch batch-size samples, but not always equals to batch_size.
                #print(img.shape, self.label.shape)             #(?, 28, 28)  (?, 10)
            else:
                img, self.label = iterator1.get_next()
                #print(img.shape, self.label.shape)             #(28, 28)  (10, )
                length = self.label.shape[0]  #equals to label.size
                self.label = tf.reshape(
                    self.label,
                    shape=[1, length])  #define the shaps as (1, 10) forcedly

            self.img = tf.reshape(
                img, shape=[-1, 28, 28,
                            1])  # -1 represents not-specific, here should be 1
示例#8
0
文件: data.py 项目: sergioframi/TFG
def category(data, data_file):
    res = []
    cat = parse_data(data_file)
    for k in cat:
        tmp = [k for e in cat[k] if e in data]
        res.extend(tmp)
    return res
示例#9
0
    def __init__(self, model_type, context_size):

        samples = []
        targets = []
        self.vocab = Vocab()
        self.vocab.add_context(raw_text)

        pairs = parse_data(raw_text, model_type, context_size)

        if model_type == 'NEG':
            self.unigram_dist = build_unigram(raw_text)
            self.positive_pairs = set(
                tuple(self.vocab.get_tensor(sample)) for sample, _ in pairs)

        for sample, target in pairs:
            if model_type == 'NEG':
                samples.append(self.vocab.get_tensor(sample))
                targets.append(target)
            elif model_type == 'SKIPGRAM':
                samples.append(self.vocab.get_tensor([sample]))
                targets.append(self.vocab.get_tensor([target]))
            else:
                samples.append(self.vocab.get_tensor(sample))
                targets.append(self.vocab.get_tensor([target]))

        self.samples = torch.tensor(samples).long()
        self.targets = torch.tensor(targets).long()
示例#10
0
def make_histogram(parameters, version=2):
    utils = UTILS()
    file_name = utils.make_file_name(parameters, root="../data/")
    data = parse_data(file_name, v=version)
    fidelities = data['F']
    plt.hist(fidelities, bins=200)
    plt.savefig('histogram/hist.pdf')
示例#11
0
    def on_any_event(event):
        if event.is_directory:
            return None

        elif event.event_type == 'created':
            # Take any action here when a file is first created.
            print("Received created event - %s." % event.src_path)
            if event.src_path.endswith('.json'):
                try:
                    df = pd.read_json(event.src_path)
                    if len(df) == 0:
                        print("Data point is empty. Removing data point...")
                        os.remove(event.src_path)
                except:
                    print("Corrupted json file. Removing data point...")
                    os.remove(event.src_path)
                else:
                    print("Updating data...")
                    Param.json_data = parse_data(Param.WATCH,
                                                 Param.OUTPUT,
                                                 Param.json_data,
                                                 event.src_path,
                                                 init=False)
                    print("Reloading Supervisor...")
                    os.system('sudo service supervisor reload')

            else:
                print("Ignoring")
示例#12
0
def menu1():
    """ Menu that will import the ship data """
    # Declare we are using the global to add the JSON data to it
    global ship_dict
    (ship_dict) = utils.parse_data()
    choice = input("Enter 0 to exit or main for main menu: ")
    exec_menu(choice)
    return
示例#13
0
def create_normalized_index():
    db = connect_db()
    cursor = db.cursor()
    data = get_all_data_from_db(cursor)
    redis = connect_word_to_norm_word()

    words = parse_data(data)
    redis_normal_index(redis, words, normalize_bag_of_words(words))
示例#14
0
def initialize():
    print("Initializing...")
    print('reading all JSON files from: ' + Param.WATCH)
    Param.json_data = parse_data(Param.WATCH,
                                 Param.OUTPUT,
                                 Param.json_data, [],
                                 init=True)
    print("Done.")
示例#15
0
def load_ships(list_widget):
    global ship_dict
    ship_dict = utils.parse_data()
    list_widget.clear()
    # Add all ships to the list
    for item in ship_dict:
        list_widget.addItem(item['Name'])
    alert = QMessageBox()
    alert.setText('Ships have been loaded from data.json')
    alert.exec_()
示例#16
0
 def test_parse_data_legacy(self, relation_get):
     _rel_data = {'testkey': repr({'test': 1})}
     relation_get.side_effect = lambda key, relid, unit: _rel_data.get(key)
     self.assertEqual(
         utils.parse_data('hacluster:1', 'neutron-api/0', 'testkey'),
         {'test': 1})
     relation_get.assert_has_calls([
         mock.call('json_testkey', 'neutron-api/0', 'hacluster:1'),
         mock.call('testkey', 'neutron-api/0', 'hacluster:1'),
     ])
示例#17
0
def test():
    ux, uy = parse_data()
    usps_data = load_usps(ux, uy, validation_size=5000, test_size=0)
    mnist_data = load_mnist(one_hot=True, validation_size=5000)
    x_original = mnist_data.dataset.validation._images
    wdgrl = WDGRL(input_dim=784,
                  gp_param=10,
                  training_steps=2000,
                  D_train_steps=20)
    wdgrl.fit(data_src=usps_data, data_tar=mnist_data, draw_plot=True)
    x_new = wdgrl.transform(x_original)
示例#18
0
def migrate_maas_dns():
    """
    Migrates the MAAS DNS HA configuration to write local IP address
    information to files.
    """
    if not needs_maas_dns_migration():
        log("MAAS DNS migration is not necessary.", INFO)
        return

    for relid in relation_ids('ha'):
        for unit in related_units(relid):
            resources = parse_data(relid, unit, 'resources')
            resource_params = parse_data(relid, unit, 'resource_params')

            if True in [ra.startswith('ocf:maas')
                        for ra in resources.values()]:
                for resource in resource_params.keys():
                    if resource.endswith("_hostname"):
                        res_ipaddr = get_ip_addr_from_resource_params(
                            resource_params[resource])
                        log("Migrating MAAS DNS resource %s" % resource, INFO)
                        write_maas_dns_address(resource, res_ipaddr)
示例#19
0
文件: data.py 项目: sergioframi/TFG
 def loc(ip, api_conf):
     if ip not in mem:
         with lock:
             # This check is necesary to check posible memoization of the thread that held the lock
             if ip not in mem:
                 params = parse_data(api_conf)
                 loc = requests.get("http://api.ipstack.com/{}".format(ip),
                                    params=params).json()
                 mem[ip] = {
                     "country": loc["country_name"],
                     "is_eu": loc["location"]["is_eu"]
                 }
     return mem[ip]
示例#20
0
def migrate_maas_dns():
    """
    Migrates the MAAS DNS HA configuration to write local IP address
    information to files.
    """
    if not needs_maas_dns_migration():
        log("MAAS DNS migration is not necessary.", INFO)
        return

    for relid in relation_ids('ha'):
        for unit in related_units(relid):
            resources = parse_data(relid, unit, 'resources')
            resource_params = parse_data(relid, unit, 'resource_params')

            if True in [ra.startswith('ocf:maas')
                        for ra in resources.values()]:
                for resource in resource_params.keys():
                    if resource.endswith("_hostname"):
                        res_ipaddr = get_ip_addr_from_resource_params(
                            resource_params[resource])
                        log("Migrating MAAS DNS resource %s" % resource, INFO)
                        write_maas_dns_address(resource, res_ipaddr)
 def test_parse_data_legacy(self, relation_get):
     _rel_data = {
         'testkey': repr({'test': 1})
     }
     relation_get.side_effect = lambda key, relid, unit: _rel_data.get(key)
     self.assertEqual(utils.parse_data('hacluster:1',
                                       'neutron-api/0',
                                       'testkey'),
                      {'test': 1})
     relation_get.assert_has_calls([
         mock.call('json_testkey', 'neutron-api/0', 'hacluster:1'),
         mock.call('testkey', 'neutron-api/0', 'hacluster:1'),
     ])
示例#22
0
 def test_parse_data_json(self, relation_get):
     _rel_data = {
         'json_testkey': json.dumps({'test': 1}),
         'testkey': repr({'test': 1})
     }
     relation_get.side_effect = lambda key, relid, unit: _rel_data.get(key)
     self.assertEqual(
         utils.parse_data('hacluster:1', 'neutron-api/0', 'testkey'),
         {'test': 1})
     # NOTE(jamespage): as json is the preferred format, the call for
     #                  testkey should not occur.
     relation_get.assert_has_calls([
         mock.call('json_testkey', 'neutron-api/0', 'hacluster:1'),
     ])
示例#23
0
async def callback(callback):

    try:
        await dp.throttle('callback', rate=0.8)

    except Throttled:
        await bot.answer_callback_query(callback.id, text='Флуд')

    else:
        await bot.answer_callback_query(callback.id)
        data = parse_data(callback.data)

        if data['action'] == 'open':
            getattr(view, data['action'])(callback.message)
def ahorros_para_lograr_meta():
    data = parse_data(request.get_json())

    ini_dep = data.get('ini_dep')
    fin_bal = data.get('fin_bal')
    freq = data.get('freq')
    num_of_years = data.get('num_of_years')
    rate = data.get('rate')
    dep_when = data.get('dep_when')
    time_scale, rows_per_page = data.get('time_scale')

    periods, periods_m, periods_a = get_periods(freq, num_of_years)

    fv = fut_val(rate / (100 * freq), freq * num_of_years, ini_dep)

    reg_dep = -1 * payment(rate / (100 * freq), freq * num_of_years, 0,
                           fin_bal + fv, dep_when)

    deposits, reg_deps, extra_deps = get_deposits(ini_dep, reg_dep, 0, 0, 0,
                                                  periods)

    interests, agg_interests, agg_deposits, balances = get_balances(
        periods, deposits, ini_dep, rate, freq, dep_when)

    return jsonify({
        'reg_dep':
        reg_dep,
        'time_scale':
        time_scale,
        'total_dep':
        sum(deposits),
        'total_int':
        sum(interests),
        'fin_bal':
        balances[-1],
        'periods':
        periods,
        'agg_deposits':
        agg_deposits,
        'agg_interests':
        agg_interests,
        'balances':
        balances,
        'table':
        get_table(periods, deposits, interests, balances),
        'table_m':
        get_table_m(periods_m, deposits, interests, balances, freq),
        'table_a':
        get_table_a(periods_a, deposits, interests, balances, freq)
    }), 200, HEADERS
 def test_parse_data_json(self, relation_get):
     _rel_data = {
         'json_testkey': json.dumps({'test': 1}),
         'testkey': repr({'test': 1})
     }
     relation_get.side_effect = lambda key, relid, unit: _rel_data.get(key)
     self.assertEqual(utils.parse_data('hacluster:1',
                                       'neutron-api/0',
                                       'testkey'),
                      {'test': 1})
     # NOTE(jamespage): as json is the preferred format, the call for
     #                  testkey should not occur.
     relation_get.assert_has_calls([
         mock.call('json_testkey', 'neutron-api/0', 'hacluster:1'),
     ])
示例#26
0
def main():

    ut = UTILS()
    parameters = ut.read_parameter_file(file="../para.dat")
    parameters['L'] = 1

    n_step = 400
    nrange = np.arange(10, 800, 10, dtype=int)
    Trange = np.arange(0.05, 4.01, 0.05)
    dt = 0.005
    parameters['dt'] = dt
    mean_fid = []
    std_fid = []
    n_fid = []
    ed1 = []
    ed2 = []

    for n in nrange:
        #for n in nrange:
        #model = ut.quick_setup(argv=['T=%.3f'%T,'n_step=%i'%n_step],file='../para.dat')
        parameters['T'] = n * dt
        #parameters['T'] = T
        parameters['n_step'] = n
        #parameters['n_step']= n_step
        #parameters['dt'] = parameters['T']/parameters['n_step']
        file_name = ut.make_file_name(parameters, root="../data/")
        res = parse_data(file_name)  # results stored here ...
        print(n, '\t', len(res['F']))
        mean_fid.append(np.max(res['F']))
        n_fid.append(np.mean(res['n_fid']))
        std_fid.append(np.std(res['F']))
        tmp = 8 * (res['protocol'] - 0.5)
        ed1.append(Ed_Ad_OP(tmp))
        #ed2.append(Ed_Ad_OP_2(res['protocol'],min_h=0, max_h=1))

    plt.plot(nrange * dt, n_fid, label='ed1')
    #plt.plot(nrange*dt, ed2,label='ed2')
    plt.legend(loc='best')
    plt.show()
    exit()
    n_step = len(res['protocol'][0])
    plotting.protocol(Trange,
                      np.mean(res['protocol'], axis=0),
                      title='$T=%.3f$' % (dt * n_step))
    #plotting.protocol(np.arange(0,n_step)*dt,np.mean(res['protocol'],axis=0),title='$T=%.3f$'%(dt*n_step))
    #plt.scatter(nrange*dt,std_fid)
    #plt.plot(nrange*dt,std_fid)
    plt.show()
def calculadora_de_ahorros():
    data = parse_data(request.get_json())

    ini_dep = data.get('ini_dep')
    reg_dep = data.get('reg_dep')
    freq = data.get('freq')
    num_of_years = data.get('num_of_years')
    rate = data.get('rate')
    extra_dep = data.get('extra_dep')
    extra_dep_start = data.get('extra_dep_start')
    extra_dep_f = data.get('extra_dep_f')
    dep_when = data.get('dep_when')
    time_scale, rows_per_page = data.get('time_scale')

    periods, periods_m, periods_a = get_periods(freq, num_of_years)

    deposits, reg_deps, extra_deps = get_deposits(ini_dep, reg_dep, extra_dep,
                                                  extra_dep_start, extra_dep_f,
                                                  periods)

    interests, agg_interests, agg_deposits, balances = get_balances(
        periods, deposits, ini_dep, rate, freq, dep_when)

    return jsonify({
        'time_scale':
        time_scale,
        'total_dep':
        sum(deposits),
        'total_int':
        sum(interests),
        'fin_bal':
        balances[-1],
        'periods':
        periods,
        'agg_deposits':
        agg_deposits,
        'agg_interests':
        agg_interests,
        'balances':
        balances,
        'table':
        get_table(periods, deposits, interests, balances),
        'table_m':
        get_table_m(periods_m, deposits, interests, balances, freq),
        'table_a':
        get_table_a(periods_a, deposits, interests, balances, freq)
    }), 200, HEADERS
示例#28
0
async def main():
    reader, writer = await asyncio.open_connection(reverse_ip, reverse_port)
    writer.write(b"con")
    await writer.drain()
    print("Connection to {} {} is established, ready to reverse proxy.".format(
        reverse_ip, reverse_port))

    future_list = []
    data = b''
    while True:
        dat = await reader.read(2048)
        data += dat
        command_list, data = parse_data(data)
        for command in command_list:
            coro = make_pipe(command, reverse_ip, reverse_port)
            asyncio.ensure_future(coro)
            print("New future created")
def upload_courses(file_path: str, engine: MySQLEngine):
    """
    Uploads all the courses to the MySQL database.

    :param self:
    :param file_path: the directory where to search the CSV files
    :param engine: a MySQLEngine where the data needs to be uploaded
    """
    df = parse_data(file_path)
    df = df[['courseId', 'creditHours']]

    # default value of Interest- will need to be hardcoded.
    df['Interest'] = ['["None"]'] * len(df)

    gpa_df = extract_gpa_data(file_path)
    df = merge_gpa_data(df, gpa_df)

    engine.insert_df(df, 'courses')
示例#30
0
def main():
    utils = UTILS()
    model = utils.quick_setup(argv=['T=3.3'], file="../para.dat")
    parameters = utils.read_parameter_file(file="../para.dat")
    Trange = np.arange(0.1, 4.01, 0.1)  # maybe trick is to bin stuff up --> ?!
    interval = [0.85, 1.0]
    #for t in Trange:
    parameters['T'] = 3.2
    parameters['n_step'] = 100
    parameters['n_quench'] = 1000
    parameters['dt'] = parameters['T'] / parameters['n_step']

    file_name = utils.make_file_name(parameters, root="../data/")
    data = parse_data(file_name, v=3)

    fid_series = data['fid_series']
    protocols = data['protocol']
    fid = data['F']
    protocols = protocols[(fid < interval[1]) &
                          (fid > interval[0])]  # protocols within an interval
    def __init__(self, training, args):
        self.training = training
        self.classification = True
        self.augment = False
        self.data_dir = 'D:/all/'
        self.df = pd.read_csv(os.path.join(self.data_dir,'stage_1_train_labels.csv'))
        self.truth = parse_data(self.df)

        if self.training:
            self.data_index, _, _,_ = train_test_split(np.arange(len(self.truth)), np.arange(len(self.truth)), test_size=0.25, random_state=42)


        else:
            _, self.data_index, _,_ = train_test_split(np.arange(len(self.truth)), np.arange(len(self.truth)), test_size=0.25, random_state=42)
            self.length = len(self.data_index)

        self.length = len(self.data_index)
        with open('val.txt','w') as f:
            for i in range(self.length):
                f.write(self.df['patientId'][self.data_index[i]] +'\n')
示例#32
0
def calcAll():
    """
    Calculate the new macd-coefficients for all MACD-objects

    :return: List of serialized MACD-objects
    """
    global macd_objects
    global data

    for macd in macd_objects:
        try:
            if macd.pair not in data:
                data[macd.pair] = fetch(macd.pair)  # get data
                data[macd.pair] = parse_data(
                    data[macd.pair])  # in each pair is stored sdf-data itself

        except Exception as err:
            pass
        sdf = macd.calculate_coefficient(data[macd.pair][macd.time_period])
    data = dict()  # empty data
示例#33
0
def train(epoch, max_epoch, model, dataloader, optimizer, class_num, print_freq=20):
    acc_mean = 0.0
    loss_mean = 0.0
    cnt = 0
    for batch_idx, data in enumerate(dataloader):
        imgs, labels = parse_data(data, class_num)
        batch_size = imgs.shape[0]
        # print(imgs.shape)

        # print(imgs[0])
        outputs = model(imgs)

        # if batch_idx == 0:
        #     print(outputs[0])

        loss_deriv = L2Loss_deriv(outputs, labels)
        optimizer.backward(loss_deriv)
        optimizer.update(imgs)

        loss = L2Loss(outputs, labels)
        loss_mean = (loss * batch_size + cnt * loss_mean) / (cnt + batch_size)
        acc = accuracy(outputs, labels)
        acc_mean = (acc * batch_size + cnt * acc_mean) / (cnt + batch_size)
        cnt += batch_size
        if (batch_idx+1) % print_freq == 0:
            print('Epoch: [{0}/{1}][{2}/{3}]  '
                  'Loss {loss:.4f} ({loss_mean:.4f})  '
                  'Acc {acc:.2f} ({acc_mean:.2f})  '
                  'Lr {lr:.6f}  '.format(
                epoch + 1, max_epoch, batch_idx + 1, len(dataloader),
                loss=loss,
                loss_mean=loss_mean,
                acc=acc,
                acc_mean=acc_mean,
                lr=optimizer.lr
                )
            )
    return loss_mean, acc_mean
示例#34
0
def getInitiateId(yb, type):
    """
        :param yb: 实例化yb对象
        :param type:  返回数据类型0:最新表单 1:前一天表单
    """
    # 通过访问任务列表获取TaskId
    taskList = yb.taskList()
    TaskId = taskList.get('data')['list'][type]["LinkTo"].split('=')[1]
    CompletedList = yb.getTaskDetail(TaskId)
    # 我的任务
    # 对< InitiateId >进行异常捕获,寻找到最新的值
    try:
        InitiateId = CompletedList.get('data')['InitiateId']
    except KeyError:
        if TaskId == 'view&id' or InitiateId is not None:
            for i in range(10):
                TaskId = taskList.get('data')['list'][i]["LinkTo"].split(
                    '=')[1]
                if TaskId != 'view&id':
                    CompletedList = yb.getTaskDetail(TaskId)
                    InitiateId = CompletedList.get('data')['InitiateId']
                    # WeiSanJin.getInitiateId:
                    print("WeiSanJin.异常处理      :<- InitiateId -> " +
                          str(InitiateId))
                    break

    shareUrl = 'https://app.uyiban.com/workflow/client/#/share?initiateId=' + InitiateId

    # 防止链接失效,再次获取分享链接
    ShareUrl = yb.getShareUrl(InitiateId)

    # 将最新的分享链接写入 config.txt
    with open('config.txt', 'w') as f:
        # print('\n' + "分享链接:" + shareUrl)
        f.write(shareUrl)

    # 返回最新的表单数据
    return parse_data(shareUrl, yb)
示例#35
0
文件: gui.py 项目: Sudoka/SWAG
def main():
  reply = eg.boolbox(msg='Hello, do you have a pre-computed classifier?', choices=('Yes', 'No'))
  if reply == 1:
    filename = eg.fileopenbox(msg='Please specificy cached classifier file ending in .clfr')
    model = utils.load_classifiers(filename)
    reply = eg.boolbox(msg='Now that we have specified our classifier, we must now specify data to be classified. Are you ready to proceed?', choices=('Yes', 'No'))
    if reply == 0:
      sys.exit()
    filename = eg.fileopenbox(msg='Please specify data to be classified.')
    D = utils.parse_data(filename)
    outfilename = ''.join(filename.split('.')[:-1]) + '.lbls'
    print 'Classifying data...'
    with open(outfilename, 'w') as f:
      for d in D:
        f.write(str(model.classify_vector(d)))
        f.write('\n')
    print 'Done!'

  else:
    filename = eg.fileopenbox(msg='Please specify training data for your classifier')
    D = utils.parse_data(filename)
    reply = eg.boolbox(msg='Would you like to engage in manual or automatic construction of your classifier?', choices=('Manual', 'Automatic'))
    #manual selection
    if reply == 1:
      algs = eg.multchoicebox(msg='Please choose at least one algorithm:', choices=('k-Nearest Neighbors', 'Support Vector Machine', 'Naive Bayes'))
      alg_params = {alg : 'auto' for alg in algs}
      #storage for set of classifiers
      C = []
      for alg in algs:
        reply = eg.boolbox(msg='Would you like to engage in manual or automatic parameter tuning for your ' + alg + ' algorithm?', choices=('Manual', 'Automatic'))
        if reply == 1:
          if alg[0] == 'k':
            params = eg.multenterbox(msg='Please select the following parameters for your ' + alg + ' algorithm:', fields=('k'), values=['1'])
            print 'Building ' + alg + ' classifier...'
            C.append(cl.kNN(D, k=int(params[0])))
            print 'Done!\n'
          if alg[0] == 'S':
            reply = eg.boolbox(msg='What type of kernel would you like to use for your Support Vector Machine?', choices=('Radial Basis Function', 'Linear'))
            if reply == 1:
              params = eg.multenterbox(msg='Please select the following parameters for your RBF Support Vector Machine:', fields=('margin', 'gamma'), values=['1.0', '1.0'])
              print 'Building ' + alg + ' classifier...'
              C.append(cl.SVM(D, kernel_type='RBF', margin=float(params[0]), gamma=float(params[1])))
              print 'Done!\n'
            else:
              params = eg.enterbox(msg='Please select the margin parameter for your Linear Support Vector Machine:', default='1.0')
              print 'Building ' + alg + ' classifier...'
              C.append(cl.SVM(D, kernel_type='Linear', margin=float(params[0])))
              print 'Done!\n'
          if alg[0] == 'N':
            params = eg.enterbox(msg='Please select the binning threshold parameter for your Naive Bayes algorithm:', default='.01')
            print 'Building ' + alg + ' classifier...'
            C.append(cl.NB(D))
            print 'Done!\n'
            
        else:
          if alg[0] == 'k':
            print 'Building ' + alg + ' classifier...'
            C.append(cl.kNN(D))
            print 'Done!\n'
          if alg[0] == 'S':
            print 'Building ' + alg + ' classifier...'
            C.append(cl.SVM(D))
            print 'Done!\n'
          if alg[0] == 'N':
            print 'Building ' + alg + ' classifier...'
            C.append(cl.NB(D))
            print 'Done!\n'

      model = mcl.AdaBoost(C)

    #automatic selection
    else:
      print 'Constructing classifiers...'
      model = mcl.AdaBoost([cl.kNN(D), cl.SVM(D), cl.NB(D)])
      print 'Done!\n'

    reply = eg.boolbox(msg='Now that we have specified our classifier, we must now specify data to be classified. Are you ready to proceed?', choices=('Yes', 'No'))
    if reply == 0:
      sys.exit()
    filename = eg.fileopenbox(msg='Please specify data to be classified.')
    D = utils.parse_data(filename)
    outfilename = ''.join(filename.split('.')[:-1])
    print 'Classifying data...'
    with open(outfilename + '.lbls', 'w') as f:
      for d in D:
        f.write(str(model.classify_vector(d)))
        f.write('\n')
    print 'Done!'
    #cache our classifier
    utils.store_classifiers(outfilename + '.clfr', model)
    #give user some information on classifiers used
    open(outfilename + '.info', 'w').write(model.get_info())
""" Run data-level preprocessing so we don't have to do it every epoch """
import utils
from collections import Counter
import csv 
import codecs

if __name__ == '__main__':
	for filename in [utils.TRAIN_FILENAME, utils.TRAIN_PLUS_DEV_FILENAME, utils.DEV_FILENAME, utils.TEST_FILENAME]:
		utils.parse_data(filename)
示例#37
0
def ha_relation_changed():
    # Check that we are related to a principle and that
    # it has already provided the required corosync configuration
    if not get_corosync_conf():
        log('Unable to configure corosync right now, deferring configuration',
            level=INFO)
        return

    if relation_ids('hanode'):
        log('Ready to form cluster - informing peers', level=DEBUG)
        relation_set(relation_id=relation_ids('hanode')[0], ready=True)
    else:
        log('Ready to form cluster, but not related to peers just yet',
            level=INFO)
        return

    # Check that there's enough nodes in order to perform the
    # configuration of the HA cluster
    if len(get_cluster_nodes()) < int(config('cluster_count')):
        log('Not enough nodes in cluster, deferring configuration',
            level=INFO)
        return

    relids = relation_ids('ha')
    if len(relids) == 1:  # Should only ever be one of these
        # Obtain relation information
        relid = relids[0]
        units = related_units(relid)
        if len(units) < 1:
            log('No principle unit found, deferring configuration',
                level=INFO)
            return

        unit = units[0]
        log('Parsing cluster configuration using rid: %s, unit: %s' %
            (relid, unit), level=DEBUG)
        resources = parse_data(relid, unit, 'resources')
        delete_resources = parse_data(relid, unit, 'delete_resources')
        resource_params = parse_data(relid, unit, 'resource_params')
        groups = parse_data(relid, unit, 'groups')
        ms = parse_data(relid, unit, 'ms')
        orders = parse_data(relid, unit, 'orders')
        colocations = parse_data(relid, unit, 'colocations')
        clones = parse_data(relid, unit, 'clones')
        locations = parse_data(relid, unit, 'locations')
        init_services = parse_data(relid, unit, 'init_services')
    else:
        log('Related to %s ha services' % (len(relids)), level=DEBUG)
        return

    if True in [ra.startswith('ocf:openstack')
                for ra in resources.values()]:
        apt_install('openstack-resource-agents')
    if True in [ra.startswith('ocf:ceph')
                for ra in resources.values()]:
        apt_install('ceph-resource-agents')

    if True in [ra.startswith('ocf:maas')
                for ra in resources.values()]:
        try:
            validate_dns_ha()
        except MAASConfigIncomplete as ex:
            log(ex.args[0], level=ERROR)
            status_set('blocked', ex.args[0])
            # if an exception is raised the hook will end up in error state
            # which will obfuscate the workload status and message.
            return

        log('Setting up access to MAAS API', level=INFO)
        setup_maas_api()
        # Update resource_parms for DNS resources to include MAAS URL and
        # credentials
        for resource in resource_params.keys():
            if resource.endswith("_hostname"):
                res_ipaddr = get_ip_addr_from_resource_params(
                    resource_params[resource])
                resource_params[resource] += (
                    ' maas_url="{}" maas_credentials="{}"'
                    ''.format(config('maas_url'),
                              config('maas_credentials')))
                write_maas_dns_address(resource, res_ipaddr)

    # NOTE: this should be removed in 15.04 cycle as corosync
    # configuration should be set directly on subordinate
    configure_corosync()
    try_pcmk_wait()
    configure_cluster_global()
    configure_monitor_host()
    configure_stonith()

    # Only configure the cluster resources
    # from the oldest peer unit.
    if is_leader():
        log('Deleting Resources' % (delete_resources), level=DEBUG)
        for res_name in delete_resources:
            if pcmk.crm_opt_exists(res_name):
                if ocf_file_exists(res_name, resources):
                    log('Stopping and deleting resource %s' % res_name,
                        level=DEBUG)
                    if pcmk.crm_res_running(res_name):
                        pcmk.commit('crm -w -F resource stop %s' % res_name)
                else:
                    log('Cleanuping and deleting resource %s' % res_name,
                        level=DEBUG)
                    pcmk.commit('crm resource cleanup %s' % res_name)
                # Daemon process may still be running after the upgrade.
                kill_legacy_ocf_daemon_process(res_name)
                pcmk.commit('crm -w -F configure delete %s' % res_name)

        log('Configuring Resources: %s' % (resources), level=DEBUG)
        for res_name, res_type in resources.items():
            # disable the service we are going to put in HA
            if res_type.split(':')[0] == "lsb":
                disable_lsb_services(res_type.split(':')[1])
                if service_running(res_type.split(':')[1]):
                    service_stop(res_type.split(':')[1])
            elif (len(init_services) != 0 and
                  res_name in init_services and
                  init_services[res_name]):
                disable_upstart_services(init_services[res_name])
                if service_running(init_services[res_name]):
                    service_stop(init_services[res_name])
            # Put the services in HA, if not already done so
            # if not pcmk.is_resource_present(res_name):
            if not pcmk.crm_opt_exists(res_name):
                if res_name not in resource_params:
                    cmd = 'crm -w -F configure primitive %s %s' % (res_name,
                                                                   res_type)
                else:
                    cmd = ('crm -w -F configure primitive %s %s %s' %
                           (res_name, res_type, resource_params[res_name]))

                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)
                if config('monitor_host'):
                    cmd = ('crm -F configure location Ping-%s %s rule '
                           '-inf: pingd lte 0' % (res_name, res_name))
                    pcmk.commit(cmd)

            else:
                # the resource already exists so it will be updated.
                code = pcmk.crm_update_resource(res_name, res_type,
                                                resource_params.get(res_name))
                if code != 0:
                    msg = "Cannot update pcmkr resource: {}".format(res_name)
                    status_set('blocked', msg)
                    raise Exception(msg)

        log('Configuring Groups: %s' % (groups), level=DEBUG)
        for grp_name, grp_params in groups.items():
            if not pcmk.crm_opt_exists(grp_name):
                cmd = ('crm -w -F configure group %s %s' %
                       (grp_name, grp_params))
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Master/Slave (ms): %s' % (ms), level=DEBUG)
        for ms_name, ms_params in ms.items():
            if not pcmk.crm_opt_exists(ms_name):
                cmd = 'crm -w -F configure ms %s %s' % (ms_name, ms_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Orders: %s' % (orders), level=DEBUG)
        for ord_name, ord_params in orders.items():
            if not pcmk.crm_opt_exists(ord_name):
                cmd = 'crm -w -F configure order %s %s' % (ord_name,
                                                           ord_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Clones: %s' % clones, level=DEBUG)
        for cln_name, cln_params in clones.items():
            if not pcmk.crm_opt_exists(cln_name):
                cmd = 'crm -w -F configure clone %s %s' % (cln_name,
                                                           cln_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        # Ordering is important here, colocation and location constraints
        # reference resources. All resources referenced by the constraints
        # need to exist otherwise constraint creation will fail.

        log('Configuring Colocations: %s' % colocations, level=DEBUG)
        for col_name, col_params in colocations.items():
            if not pcmk.crm_opt_exists(col_name):
                cmd = 'crm -w -F configure colocation %s %s' % (col_name,
                                                                col_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Locations: %s' % locations, level=DEBUG)
        for loc_name, loc_params in locations.items():
            if not pcmk.crm_opt_exists(loc_name):
                cmd = 'crm -w -F configure location %s %s' % (loc_name,
                                                              loc_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        for res_name, res_type in resources.items():
            if len(init_services) != 0 and res_name in init_services:
                # Checks that the resources are running and started.
                # Ensure that clones are excluded as the resource is
                # not directly controllable (dealt with below)
                # Ensure that groups are cleaned up as a whole rather
                # than as individual resources.
                if (res_name not in clones.values() and
                    res_name not in groups.values() and
                        not pcmk.crm_res_running(res_name)):
                    # Just in case, cleanup the resources to ensure they get
                    # started in case they failed for some unrelated reason.
                    cmd = 'crm resource cleanup %s' % res_name
                    pcmk.commit(cmd)

        for cl_name in clones:
            # Always cleanup clones
            cmd = 'crm resource cleanup %s' % cl_name
            pcmk.commit(cmd)

        for grp_name in groups:
            # Always cleanup groups
            cmd = 'crm resource cleanup %s' % grp_name
            pcmk.commit(cmd)

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id, clustered="yes")
示例#38
0
#
# Push data live to API using Python on a RaspberryPi
#

import serial
from utils import parse_data, send_data

ser = serial.Serial('/dev/ttyACM0')
while True:
    message = ser.readline()
    data = parse_data(message)
    if data:
        send_data(data)
示例#39
0
	else: 
		print "no trimming"

	print "Algorithm type:",
	if algorithm_type == ClusterAlg.KMEANS:
		print "kmeans"
	elif algorithm_type == ClusterAlg.HIERARCHICAL:
		print "hierarchical"
	elif algorithm_type == ClusterAlg.SELFORGMAPS:
		print "selforgmaps"
		print "Number of points in SOM grid (xgrid * ygrid):", xgrid * ygrid

	# Parse input data.

	try:
		data = utils.parse_data(input_file_path)
	except IOError, err:
		sys.exit(err)

	# Import requested events.
 	# TODO(karol): this may have to be moved from here to the plotting
	# file.

	try:
		events = { }
		if import_catastrophic_events:
			catastrophic_events = eventutils.import_events(
					"../data/wydarzenia-katastrofy-polska.txt")
			events[catastrophic_events[0]] = catastrophic_events[1]
		if import_economical_events:
			economical_events = eventutils.import_events(
示例#40
0
			event_paths['ekonomiczne'] = '../data/wydarzenia-ekonomiczne-polska.txt'
			event_paths['inne'] = '../data/wydarzenia-inne-polska.txt'
			event_file_path = event_paths[arg[7:]]
		if arg.startswith('xrange:'):
			if arg[7:]:
				set_xrange = 'set xrange [%s]' % arg[7:]
	
	# If error message is not empty print it and exit.
	
	if err_msg != "":
		print err_msg
		sys.exit(1)

	# Parse stock data.

	data = utils.parse_data("../data/notowania.txt")

	found_cluster = False
	plot_tmpfiles = []
	if all_clusters:
		clusters_count = detect_clusters_count(clusters_file_path)
		for i in range(clusters_count):
			_, plot_data = plot_single_cluster(clusters_file_path, i, average_plot)
			plot_tmpfiles.append(plot_data)
	else:
		found_cluster, plot_data_tmpfile_path = plot_single_cluster(clusters_file_path, cluster_number, average_plot)
	
	if found_cluster:

		# Generate file with commands for gnuplot.
示例#41
0
	def setUp(self):
		self.data1 = utils.parse_data(self.test_file_path1)
		self.data2 = utils.parse_data(self.test_file_path2)