Exemplo n.º 1
0
def update_repos():
    current_time = datetime.now()
    new_time = (current_time.strftime("%c"))

    # Drop tables
    engine = create_engine(ProductionConfig.SQLALCHEMY_DATABASE_URI)
    GitRepo.__table__.drop(engine)
    GitUser.__table__.drop(engine)

    db.create_all()

    # Update users
    users = LocalUser.query.all()
    timezone = {'Time-Zone': 'PST8PDT'}
    for user in users:
        username = user.localuser
        git_data = requests.get(
            f'https://api.github.com/users/{username}/events?per_page=100',
            params=timezone)
        content = git_data.content
        parsed_json = json.loads(content)
        parse_data(parsed_json)

    # Timestamp update
    engine = create_engine(ProductionConfig.SQLALCHEMY_DATABASE_URI)
    Timestamp.__table__.drop(engine)
    db.create_all()

    new_timestamp = Timestamp(time=new_time)

    db.session.add(new_timestamp)
    db.session.commit()
Exemplo n.º 2
0
def q23_request(led_matrix):
    raw_response = data.make_request('MTABC_Q23', 503862)
    valid_until, json_buses = data.parse_data(raw_response)
    buses = data.simplify_parsed_data(json_buses)

    led_matrix.device.contrast(32)
    led_matrix.print_msg(str(buses[0]))
Exemplo n.º 3
0
	def on_open_saves_dat_clicked(self):
		if 'saves.dat' in self.open_tabs:
			return self.activate_tab('saves.dat')

		try:
			path = miasutil.find_miasmata_save()
		except Exception as e:
			path = QtGui.QFileDialog.getOpenFileName(self,
					"Select Miasmata Save Location...",
					None, 'Miasmata save files (*.dat)')[0]
			if not path:
				return

		saves = data.parse_data(open(path, 'rb'))

		saves.name = data.null_str('saves.dat')
		view = miasmod_data.MiasmataDataView(saves, sort=True, save_path = path, name='saves.dat')
		self.add_tab(view, 'saves.dat', 'saves.dat')
Exemplo n.º 4
0
def main():
	import rs5archive, rs5file, data, imag
	print 'Opening saves.dat...'
	saves = open('saves.dat', 'rb')
	print 'Procesing saves.dat...'
	saves = data.parse_data(saves)
	exposure_map = saves[sys.argv[1]]['player']['MAP']['exposure_map'].raw
	print 'Opening main.rs5...'
	archive = rs5archive.Rs5ArchiveDecoder(open('main.rs5', 'rb'))

	print 'Extracting Map_FilledIn...'
	filledin = rs5file.Rs5ChunkedFileDecoder(archive['TEX\\Map_FilledIn'].decompress())
	print 'Decoding Map_FilledIn...'
	filledin = imag.open_rs5file_imag(filledin, (1024, 1024), 'RGB')

	print 'Extracting Map_OverlayInfo...'
	overlayinfo = rs5file.Rs5ChunkedFileDecoder(archive['TEX\\Map_OverlayInfo'].decompress())
	print 'Decoding Map_OverlayInfo...'
	overlayinfo = imag.open_rs5file_imag(overlayinfo, (1024, 1024), 'RGB')

	print 'Extracting player_map_achievements...' # XXX: Also in environment.rs5
	shoreline = rs5file.Rs5ChunkedFileDecoder(archive['player_map_achievements'].decompress())

	print 'Generating map...'
	(image, outline_mask, filledin_mask, overlayinfo_mask, extra) = gen_map(exposure_map, filledin, overlayinfo)

	print 'Darkening...'
	image = Image.eval(image, lambda x: x/4)

	print 'Overlaying shoreline...'
	overlay_smap(image, shoreline, outline_mask, filledin_mask)

	print 'Saving exposure_map.png...'
	image.save('exposure_map.png')

	print 'Compressing exposure_map...'
	new_exposure_map = make_exposure_map(outline_mask, filledin_mask, overlayinfo_mask, extra)

	print 'Comparing...'
	assert(exposure_map == new_exposure_map)

	print 'Success'
Exemplo n.º 5
0
def prepare_data(path, test=0.1, max_len=None, split=False):
    poses, parents, rels, _ = parse_data(path, max_len)

    poses = add_padding_feature(
        pad_sequences(poses, maxlen=max_len, padding='post'))
    new_parents = []
    for sentence in parents:
        sentence = pad_sequences(sentence, maxlen=max_len + 1, padding='post')
        new_parents.append(sentence)

    parents = add_padding_feature(
        pad_sequences(new_parents, maxlen=max_len, padding='post'))
    rels = add_padding_feature(
        pad_sequences(rels, maxlen=max_len, padding='post'))

    if split:
        poses_train, poses_test, parents_train, parents_test, rels_train, rels_test = train_test_split(
            poses, parents, rels, test_size=test, shuffle=False)

        return poses_train, poses_test, parents_train, parents_test, rels_train, rels_test, max_len
    else:
        return poses, parents, rels, max_len
Exemplo n.º 6
0
def parse_contents(contents, filename, date):
    """
    Parses user-submitted dataset.

    Args:
        contents (str): A contents string generated from the user-uploaded file.
        filename (str): The filename of the user-uploaded file.
        date (str): The date of the user-uploaded file.
    
    Returns:
        Processed data in the form of Python data structures.
    """
    content_type, content_string = contents.split(',')
    decoded = base64.b64decode(content_string)

    if 'csv' in filename:
        # Assume that the user uploaded a CSV file
        df, data, categories, columns = parse_data(
            io.StringIO(decoded.decode("ISO-8859-1")))
    else:
        raise ValueError("Only CSV format supported.")

    return df, data, categories, columns
Exemplo n.º 7
0
def main():

    cprint('Starting LDS Tools Automator...', 'green', attrs=['bold'])

    cprint('\tFetching data from Google Sheets...', 'cyan')

    try:
        values = data.get_data()
    except ValueError:
        cprint('Error: no data in spreadsheet!', 'red', attrs=['bold'])

    members = data.parse_data(values)
    decoded = json.loads(members)

    cprint('\tPulling records...', 'cyan')
    username = raw_input(colored('\tLDS Username: '******'yellow'))
    password = getpass.getpass(colored('\tLDS Password: '******'yellow'))

    session = requests.session()
    credentials = tools.login(session, username, password)

    mrns = []
    for member in decoded:

        try:
            result = records.pull(member, credentials, session)
            mrns.append(result)
            cprint('\tsuccess!', 'green')

        except AssertionError:
            result = {'error': 'non-200 response', 'row': member['id']}
            cprint('\terror', 'red', attrs=['bold'])
        except (ValueError, KeyError, TypeError):
            result = {'error': 'unable to parse JSON', 'row': member['id']}
            cprint('\terror', 'red', attrs=['bold'])

        row = data.build_pulled_row(result)
        data.update_row(row, result['row'])

    cprint('\tFetching former bishops...', 'cyan')

    count = 0
    for member in mrns:

        try:
            result = bishops.fetch(member, credentials, session)
            cprint('\tsuccess!', 'green')
            count += 1

        except AssertionError:
            result = {'error': 'non-200 response', 'row': member['row']}
            cprint('\terror', 'red', attrs=['bold'])

        except (ValueError, KeyError, TypeError):
            result = {'error': 'unable to parse JSON', 'row': member['row']}
            cprint('\terror', 'red', attrs=['bold'])

        row = data.build_bishop_row(result)
        data.update_row(row, result['row'])

    msg = 'moved %d records' % len(mrns)
    cprint(msg, 'green', attrs=['bold'])
    msg = 'found %d bishops' % count
    cprint(msg, 'green', attrs=['bold'])
Exemplo n.º 8
0
		ave_acc = float(total_acc) / cnt
		print "ave_acc:", ave_acc

	return float(total_acc) / cnt
	
if __name__ == '__main__':
	args = get_arg()
	print 'gpu:',args.gpu
	root_path = "../../data/tasks_1-20_v1-2/en"
	# 未知語(:k)が引数として与えられた場合、id(:v)を付与する
	vocab = collections.defaultdict(lambda: len(vocab))
	for data_id in range(1,21):
#	data_id = 1
		# glob.glob: マッチしたパスをリストで返す
		fpath = glob.glob('%s/qa%d_*train.txt' % (root_path, data_id))[0]
		train_data = data.parse_data(fpath, vocab)
		fpath = glob.glob('%s/qa%d_*test.txt' % (root_path, data_id))[0]
		test_data = data.parse_data(fpath, vocab)
		print('Training data: %d' % len(train_data))		# 文id=1で区切ったとき(story)のデータ数
		train_data = convert_data(train_data, args.gpu)
		test_data = convert_data(test_data, args.gpu)
		model = MemNN(len(vocab), 20, 50)	# (n_units:word_embeddingの次元数(=20), n_vocab:語彙数, max_mem=50)
		if args.gpu >= 0:
			model.to_gpu()
			xp = cupy
		else:
			xp = np

		# Setup an optimizer	
		optimizer = optimizers.Adam(alpha=0.01, beta1=0.9, beta2=0.999, eps=1e-6)
		optimizer.setup(model)
Exemplo n.º 9
0
from bm_alg import boyer_moore_match
from routes import save_file, download, file_download_link
from dash.dependencies import Input, Output, State

try:
    from predict_input import predict, MODEL
except ModuleNotFoundError:
    print('[WARNING] tensorflow not found.')

import dash
import plotly.express as px
import pandas as pd
import dash_html_components as html

# Initialize data for initial layout
df, DATA, CATEGORIES, COLUMNS = parse_data(DATASET)


def parse_json(json_data, category):
    """
    Parses the <json_data> from intermediate value.

    Args:
        json_data (str): A string of data to process in JSON format.
        category (str): The category ('all' / 'spam' / 'ham') to extract data from.

    Returns:
        (status_code, data), where status_code = 0 if there is no error, 1 otherwise.
    """
    if json_data:
        loaded_data = json.loads(json_data)
Exemplo n.º 10
0
    return float(total_acc) / cnt

if __name__ == '__main__':
    args = get_arg()
    print 'gpu:', args.gpu
    root_path = "../../data/tasks_1-20_v1-2/en"
    # 未知語(:k)が引数として与えられた場合、id(:v)を付与する
    vocab = collections.defaultdict(lambda: len(vocab))
    for data_id in range(1, 21):
        print "-------------------------------------"
        print "task_id:", data_id
        data_id = 8
        #	data_id = 1
        # glob.glob: マッチしたパスをリストで返す
        fpath = glob.glob('%s/qa%d_*train.txt' % (root_path, data_id))[0]
        train_data = data.parse_data(fpath, vocab)
        fpath = glob.glob('%s/qa%d_*test.txt' % (root_path, data_id))[0]
        test_data = data.parse_data(fpath, vocab)
        #		print('Training data: %d' % len(train_data))		# 文id=1で区切ったとき(story)のデータ数
        train_data = convert_data(train_data, args.gpu)
        test_data = convert_data(test_data, args.gpu)
        model = MemNN(
            len(vocab), 20,
            50)  # (n_units:word_embeddingの次元数(=20), n_vocab:語彙数, max_mem=50)
        if args.gpu >= 0:
            model.to_gpu()
            xp = cupy
        else:
            xp = np
        print vocab
        # Setup an optimizer
Exemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--exp_name',
                        required=True,
                        help='experiment result saving path')

    parser.add_argument('--epoch', type=int, default=10)
    parser.add_argument('--batch_size', type=int, default=512)
    parser.add_argument('--model', type=str, default='GRU_CNN')
    parser.add_argument('--cuda', action='store_true')

    args = parser.parse_args()

    # data parser

    raw_data, raw_label = load_data(cfg.data_root, cfg.data_ext)

    seqs = parse_data(raw_data)
    labels = parse_label(raw_label)

    onehot_labels = get_onehot(labels)

    padded_seqs, embed_size = pad_feature(seqs,
                                          padding_ext=cfg.padding_ext,
                                          global_padding=cfg.padding_all)

    padded_seqs = np.array(padded_seqs)
    labels = np.array(labels)

    data_size = len(padded_seqs)
    label_size = len(set(labels))

    data_idxs = list(range(data_size))

    training_split = 0.7

    train_idx = random.sample(data_idxs, int(data_size * training_split))

    train_dict = defaultdict(int)
    for idx in train_idx:
        train_dict[idx] = 1

    val_idx = [idx for idx in data_idxs if not train_dict[idx] == 1]

    train_onehot = np.array([onehot_labels[idx] for idx in train_idx])
    val_onehot = np.array([onehot_labels[idx] for idx in val_idx])

    train_dataset = Dataset(padded_seqs, labels, train_idx, mode='train')
    val_dataset = Dataset(padded_seqs, labels, val_idx, mode='val')

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=cfg.num_workers)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=cfg.num_workers)

    criterion = nn.CrossEntropyLoss()

    if args.cuda:
        criterion = criterion.cuda()

    if args.model == 'GRU_CNN':
        model = GRU_CNN(vocab_size=data_size,
                        emb_size=embed_size,
                        label_size=label_size,
                        hidden_1=cfg.hidden_1,
                        hidden_2=cfg.hidden_2,
                        pt_embed=None,
                        dropout=cfg.dropout)

    elif args.model == 'Text_CNN':
        model = Text_CNN(vocab_size=data_size,
                         emb_size=embed_size,
                         output_channels=cfg.kernel_dims,
                         kernel_heights=cfg.kernel_heights,
                         kernel_width=embed_size,
                         label_size=label_size,
                         pt_embed=None,
                         dropout=cfg.dropout)

    else:
        raise NotImplementedError

    if args.cuda:
        model = model.cuda()

    optimizer = torch.optim.Adam(model.get_trainable_parameters(),
                                 lr=cfg.learning_rate,
                                 betas=cfg.betas,
                                 eps=cfg.eps)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           factor=0.2,
                                                           patience=2,
                                                           verbose=1)

    best_acc = 0
    for epoch in range(args.epoch):
        train_loss, train_acc, train_auc = train_epoch(model, args.batch_size,
                                                       train_loader, optimizer,
                                                       criterion, train_onehot)
        val_loss, val_acc, val_auc = eval_epoch(model, args.batch_size,
                                                val_loader, criterion,
                                                val_onehot)
        scheduler.step(val_loss)

        print(
            '[training][Epoch:{epoch}] loss:{loss:.3f} accu:{accu:.3f} auc:{auc:.3f}'
            .format(epoch=epoch,
                    loss=train_loss,
                    accu=train_acc,
                    auc=train_auc))
        print(
            '[validation][Epoch:{epoch}] loss:{loss:.3f} accu:{accu:.3f} auc:{auc:.3f}'
            .format(epoch=epoch, loss=val_loss, accu=val_acc, auc=val_auc))

        ckpt = dict(model=model.state_dict(),
                    settings=args,
                    epoch=epoch,
                    optim=optimizer,
                    accu=val_acc)

        if val_acc > best_acc:
            best_acc = val_acc
            best_model = model
            save_dir = osp.join(cfg.model_dir, args.exp_name)
            if not osp.exists(save_dir):
                os.makedirs(save_dir)

            torch.save(ckpt, osp.join(save_dir,
                                      'epoch_' + str(epoch) + '.ckpt'))

    print('Best acc:{:.3f}'.format(best_acc))
Exemplo n.º 12
0
               data.get_single_feature(b, 4, data_set, sample_sizes),
               color='y')
    ax.legend(['top', 'jg', 'mid', 'adc', 'sup'])
    ax.grid(True)
    plt.xlabel('Param ' + str(a))
    plt.ylabel('Param ' + str(b))
    plt.show()


#Scatterplot of Assists vs Gold Share
plot_param(1, 3, inputs, sample_sizes)
plt.clf()
#Scatterplot of Deaths vs Gold Share
plot_param(0, 3, inputs, sample_sizes)

#url strings of testing data for each position
sup_2020_playoffs_url = "https://lol.gamepedia.com/Special:RunQuery/TournamentStatistics?TS%5Bpreload%5D=TournamentByChampionRole&TS%5Brole%5D=Support&TS%5Btournament%5D=LCS/2020%20Season/Spring%20Playoffs&pfRunQueryFormName=TournamentStatistics"
top_2020_playoffs_url = "https://lol.gamepedia.com/Special:RunQuery/TournamentStatistics?TS%5Bpreload%5D=TournamentByChampionRole&TS%5Brole%5D=Top&TS%5Btournament%5D=NA%20Academy%20League/2020%20Season/Spring%20Playoffs&pfRunQueryFormName=TournamentStatistics"
mid_2020_playoffs_url = "https://lol.gamepedia.com/Special:RunQuery/TournamentStatistics?TS%5Bpreload%5D=TournamentByChampionRole&TS%5Brole%5D=Mid&TS%5Btournament%5D=NA%20Academy%20League/2020%20Season/Spring%20Playoffs&pfRunQueryFormName=TournamentStatistics"
adc_2020_playoffs_url = "https://lol.gamepedia.com/Special:RunQuery/TournamentStatistics?TS%5Bpreload%5D=TournamentByChampionRole&TS%5Brole%5D=AD%20Carry&TS%5Btournament%5D=NA%20Academy%20League/2020%20Season/Spring%20Playoffs&pfRunQueryFormName=TournamentStatistics"
jg_2020_playoffs_url = "https://lol.gamepedia.com/Special:RunQuery/TournamentStatistics?TS%5Bpreload%5D=TournamentByChampionRole&TS%5Brole%5D=Jungle&TS%5Btournament%5D=NA%20Academy%20League/2020%20Season/Spring%20Playoffs&pfRunQueryFormName=TournamentStatistics"

#testing samples of each position
inputs_top = data.parse_data(top_2020_playoffs_url, params)
inputs_jg = data.parse_data(jg_2020_playoffs_url, params)
inputs_mid = data.parse_data(mid_2020_playoffs_url, params)
inputs_adc = data.parse_data(adc_2020_playoffs_url, params)
inputs_sup = data.parse_data(sup_2020_playoffs_url, params)

#prints accuracy of model
print_acc(inputs_top, inputs_jg, inputs_mid, inputs_adc, inputs_sup)