Exemple #1
0
def evaluate(data_iter, model,vecs,TEXT,LABELS,criterion,emb_dim):
    model.eval()
    corrects, avg_loss, t5_corrects, rr = 0, 0, 0, 0
    for batch_count,batch in enumerate(data_iter):
        #print('avg_loss:', avg_loss)
        inp, target = batch.text, batch.label
        inp.data.t_()#, target.data.sub_(1)  # batch first, index align
        inp3d = torch.cuda.FloatTensor(inp.size(0),inp.size(1),emb_dim)
        for i in range(inp.size(0)):
          for j in range(inp.size(1)):
            inp3d[i,j,:] = vecs[TEXT.vocab.itos[inp[i,j].data[0]]]
        #if args.cuda:
        #    feature, target = feature.cuda(), target.cuda()

        outp = batch.label.t()
        outp3d = torch.cuda.FloatTensor(outp.size(0),outp.size(1),emb_dim)
        for i in range(outp.size(0)):
          for j in range(outp.size(1)):
            outp3d[i,j,:] = vecs[LABELS.vocab.itos[outp[i,j].data[0]]]

        preds, attns = model(Variable(inp3d),Variable(outp3d,requires_grad=False))
        loss,grad,numcorrect = memoryEfficientLoss(preds, batch.label, model.generate,criterion,eval=True)

        avg_loss += loss

    size = len(data_iter.dataset)
    avg_loss = avg_loss/size
    model.train()
    print("EVAL: ",avg_loss)
    generate(val_iter, model, vecs, TEXT, LABELS, 300)

    return avg_loss#, accuracy, corrects, size, t5_acc, t5_corrects, mrr);
def generate_config(iface,webfolder):
  print "+---------------------------------------------------"
  print "| Scanning for new computers..."
  scann_port(iface)
  print "| Done!"
  print "+---------------------------------------------------\n\n\n"
  print "Generating config files for all scanned PCs..."
  generate(iface,webfolder)
  print "Done!"
Exemple #3
0
def main():

	#JSON object for settings
	jsondata = open(os.path.dirname(os.path.abspath(__file__)) + "/conf.json")
	settings = json.load(jsondata)

	#datasource for database access
	datasource = db.db(os.path.dirname(os.path.abspath(__file__)) + "/ssvd.db")
	datasource.open()
	
	print("Starting crawl..")
	
	#looping through folders specified in settings
	for folder in settings["folders"]:
	
		if(folder == ""):
			tablename = "Videos"
		else:
			tablename = folder.lower()
	
		print("Crawling " + folder)
	
		#truncate database before new entries
		datasource.truncate(tablename)
		
		fullpath = str(settings["fullpath"]) + str(folder)
		for dirname, subList, fileList in os.walk(fullpath):
		
			#Sort lists alphabetically and case insensitive
			subList.sort(key = lambda s: s.lower())
			fileList.sort(key = lambda s: s.lower())
			
			for filename in fileList:
			
				#Ignores all AppleDouble directories
				if ".AppleDouble" not in dirname:
				
					#Checks if filename contains any wanted extensions
					if any(str(ext) in filename for ext in settings["filetypes"]):

						#Insert into database
						datasource.insert(tablename, dirname.replace(fullpath,''), filename, int(os.stat(os.path.join(dirname, filename)).st_mtime))
						
	print("Done crawling")
						
	#Commit and close database when done
	datasource.close()
	
	#Generate webpages
	generate()
Exemple #4
0
def test():
    X_train, y_train, X_test, y_test = getData()
    # Calculate SSIM
    result = generate(X_test[0:10000], (10000, 28, 28, 1))
    predict(sess, sess_model, X_test[0:100])
    predict(sess, sess_model, result[0:100])
    print()
Exemple #5
0
def progress():
    print(request.args)
    # Extract information
    seed = request.args['seed']
    length = int(request.args['length'])
    temperature = float(request.args['temperature'])
    topk = int(request.args['topk'])
    topp = float(request.args['topp'])
    fast_pattern = 'fast_pattern' in request.args.keys()
    nsamples = int(request.args['nsamples'])
    modelname = request.args['modelname']

    def generate():
        # Generate a random sequence
        for x in text_generator(seed=seed,
                                length=length,
                                temperature=temperature,
                                topk=topk,
                                topp=topp,
                                fast_pattern=fast_pattern,
                                nsamples=nsamples,
                                modelname=modelname):
            yield f"data:{x}\n\n"

    return Response(generate(), mimetype='text/event-stream')
def main():
    arguments = docopt(__doc__)

    if arguments['lorem']:
        print(generate(arguments['<iterations>'], arguments['--text-size']))
    elif arguments['data']:
        get_data(arguments['<data-url>'], arguments['<data-location>'])
Exemple #7
0
def main(prefix, temperature, length, topk, topp, nsamples=5):
    raw_text = prefix
    context_tokens = tokenizer.convert_tokens_to_ids(
        tokenizer.tokenize(raw_text))
    generated = 0
    for _ in range(nsamples):
        out = generate(n_ctx=n_ctx,
                       model=model,
                       context=context_tokens,
                       length=length,
                       is_fast_pattern=True,
                       tokenizer=tokenizer,
                       temperature=temperature,
                       top_k=topk,
                       top_p=topp,
                       repitition_penalty=repetition_penalty,
                       device=device)
        generated += 1
        text = tokenizer.convert_ids_to_tokens(out)
        for i, item in enumerate(text[:-1]):  # 确保英文前后有空格
            if is_word(item) and is_word(text[i + 1]):
                text[i] = item + ' '
        for i, item in enumerate(text):
            if item == '[MASK]':
                text[i] = ''
            elif item == '[CLS]':
                text[i] = '\n\n'
            elif item == '[SEP]':
                text[i] = '\n'
        info = "=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40 + "\n"
        print(info)
        text = ''.join(text).replace('##', '').strip()
        # print(text)
        print(text)
def train():
    device = torch.device("cuda")
    vocab, train_data = pre_process()
    vocab_size = len(vocab)
    train_data = torch.from_numpy(train_data)
    data_loader = DataLoader(train_data,
                             batch_size=Config.batch_size,
                             shuffle=True,
                             drop_last=True)
    model = PoetryModel(vocab_size, Config.embedding_dim, Config.hidden_dim)
    model.to(device)
    optimizer = opt.Adam(model.parameters(), lr=Config.lr)
    criterion = nn.CrossEntropyLoss()

    for epoch in range(Config.epoch):
        count = 0
        for i, data in enumerate(data_loader):
            data = data.long().transpose(1, 0).contiguous()
            data = data.to(device)
            optimizer.zero_grad()
            input_, target = data[:-1, :], data[1:, :]
            output, _ = model(input_)
            loss = criterion(output, target.view(-1))
            loss.backward()
            optimizer.step()
            count += 1
            perplexity = torch.mean(torch.exp(loss)).item()
            print("epoch:%d step:%d loss:%0.8f perplexity:%0.8f" %
                  (epoch + 1, count, loss.item(), perplexity))

    gen_poetry = ''.join(generate(model, '日', vocab))
    print(gen_poetry)
    torch.save(model.state_dict(), 'model/poem.pth')
Exemple #9
0
def userTest():
    print("正在初始化......")
    datas = np.load("data/tang.npz",allow_pickle=True)
    data = datas['data']
    ix2word = datas['ix2word'].item()
    word2ix = datas['word2ix'].item()
    model = PoetryModel(len(ix2word), Config.embedding_dim, Config.hidden_dim)
    model.load_state_dict(t.load(Config.model_path, 'cpu'))
    if Config.use_gpu:
        model.to(t.device('cuda'))
    print("初始化完成!\n")
    while True:
        print("欢迎使用李港唐诗生成器,\n"
              "输入1 进入首句生成模式\n"
              "输入2 进入藏头诗生成模式\n")
        mode = int(input())
        if mode == 1:
            print("请输入您想要的诗歌首句,可以是五言或七言")
            start_words = str(input())
            gen_poetry = ''.join(generate(model, start_words, ix2word, word2ix))
            print("生成的诗句如下:%s\n" % (gen_poetry))
        elif mode == 2:
            print("请输入您想要的诗歌藏头部分,不超过16个字,最好是偶数")
            start_words = str(input())
            gen_poetry = ''.join(gen_acrostic(model, start_words, ix2word, word2ix))
            print("生成的诗句如下:%s\n" % (gen_poetry))
Exemple #10
0
def formNPY():
    X_train, y_train, X_test, y_test = getData()
    np.save("test_data/test_data.npy", X_test)
    start = time.clock()
    attacks = generate(X_test, (10000, 28, 28, 1))
    end = time.clock()
    print('生成10k对抗样本时间 %s' % (end - start))
    np.save("attack_data/attack_data.npy", attacks)
Exemple #11
0
 def gen_hyp_data(model, N, text_len=500):
     texts, hiddens, hyps = [], [], []
     for i in range(N):
         text, hidden = generate(model, '\n\n', text_len, temperature, True)
         hidden = hidden.reshape(hidden.shape[0], -1)
         hyp = hypothesis(text)
         hiddens.append(hidden)
         hyps.append(hyp)
         texts.append(text)
     return ''.join(texts), np.concatenate(hyps), np.concatenate(hiddens)
Exemple #12
0
def face_handler(unused_addr,args):
  key_=np.random.choice(keyword)
  print(key_)
  data=generate(key_,0.9)
  jdata=json.loads(data)

  plist=jdata['_poem']  
  sendMessage(plist[0:4],client_ip[0],pdelay,pfadein*4,pshow+pfadein*3,pfadeout)
  sendMessage(plist[4:6],client_ip[1],pdelay+pfadein*4,pfadein*2,pshow+pfadein*1,pfadeout)
  sendMessage(plist[6:],client_ip[2],pdelay+pfadein*6,pfadein,pshow,pfadeout)
def test():
    datas = np.load("tang.npz")
    ix2word = datas['ix2word'].item()
    word2ix = datas['word2ix'].item()
    model = PoetryModel(len(ix2word), config.embedding_dim, config.hidden_dim)
    model.load_state_dict(torch.load(config.model_path, config.device))
    while True:
        start_words = str(input())
        gen_poetry = ''.join(
            generate(model, start_words, ix2word, word2ix, config))
        print(gen_poetry)
Exemple #14
0
def main():
	k = 10
	g = generate(k)

	print("Graph parameters:\n")
	print("|V| = {}\n".format(len(g)))
	print("E = {}\n".format(g.edges()))
	print("|E| = {}\n".format(len(g.edges())))

	print("Algorithm: iterative compression with k = {}.".format(k))
	fvs, time = time_instance(g, k, alg=fvs_via_ic, n=1)

	print("\nDone!")
	print("FVS = {} with size {}.".format(fvs, len(fvs)))

	print("Computation took {:.3f} seconds.".format(time))
Exemple #15
0
def main():
    start = time.time()
    loss_avg = 0
    try:
        print("Training for %d epochs..." % args.n_epochs)
        for epoch in range(1, args.n_epochs + 1):
            loss = train(*random_training_set(args.chunk_len))
            loss_avg += loss

            if epoch % args.print_every == 0:
                print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
                print(generate(decoder, 'Wh', 1000), '\n')

        print("Saving...")
        save()

    except KeyboardInterrupt:
        print("Saving before quit...")
        save()
Exemple #16
0
def save(all_characters):
    save_filename = os.path.splitext(os.path.basename(
        args.filename))[0] + '.pt'
    torch.save(decoder, save_filename)
    print('Saved as %s' % save_filename)
    save_all_chars = os.path.splitext(os.path.basename(
        args.filename))[0] + '.chars'
    with open(save_all_chars, 'w') as out_all_chars:
        out_all_chars.write(all_characters)


try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in range(1, args.n_epochs + 1):
        loss = train(*random_training_set(args.chunk_len))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(generate(decoder, all_characters, 'Wh', 100), '\n')

    print("Saving...")
    save(all_characters)

except KeyboardInterrupt:
    print("Saving before quit...")
    save(all_characters)
Exemple #17
0
        output, hidden = decoder(inp[c], hidden)
        loss += criterion(output, target[c])

    loss.backward()
    decoder_optimizer.step()

    return loss.data[0] / args.chunk_len

def save():
    save_filename = os.path.splitext(os.path.basename(args.filename))[0] + '.pt'
    torch.save(decoder, save_filename)
    print('Saved as %s' % save_filename)

try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in range(1, args.n_epochs + 1):
        loss = train(*random_training_set(args.chunk_len))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(generate(decoder, 'Wh', 100), '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()

Exemple #18
0
                                     lr=args.learning_rate)
criterion = nn.CrossEntropyLoss()

if args.cuda:
    decoder.cuda()

start = time.time()
all_losses = []
loss_avg = 0

try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in tqdm(range(1, args.n_epochs + 1)):
        loss = train(*random_training_set(
            args.chunk_len,
            args.batch_size))  # TODO: do we want to randomly select? A: Kinda
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(generate(decoder, all_words, None, 20, cuda=args.cuda), '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()
Exemple #19
0
opt = hpddm.optionGet()
args = ctypes.create_string_buffer(' '.join(sys.argv[1:]).encode('ascii', 'ignore'))
hpddm.optionParse(opt, args, rankWorld == 0)
def appArgs():
    val = (ctypes.c_char_p * 4)()
    (val[0], val[1], val[2], val[3]) = [ b'Nx=<100>', b'Ny=<100>', b'overlap=<1>', b'generate_random_rhs=<0>' ]
    desc = (ctypes.c_char_p * 4)()
    (desc[0], desc[1], desc[2], desc[3]) = [ b'Number of grid points in the x-direction.', b'Number of grid points in the y-direction.', b'Number of grid points in the overlap.', b'Number of generated random right-hand sides.' ]
    hpddm.optionParseInts(opt, args, 4, ctypes.cast(val, ctypes.POINTER(ctypes.c_char_p)), ctypes.cast(desc, ctypes.POINTER(ctypes.c_char_p)))
    (val[0], val[1]) = [ b'symmetric_csr=(0|1)', b'nonuniform=(0|1)' ]
    (desc[0], desc[1]) = [ b'Assemble symmetric matrices.', b'Use a different number of eigenpairs to compute on each subdomain.' ]
    hpddm.optionParseArgs(opt, args, 2, ctypes.cast(val, ctypes.POINTER(ctypes.c_char_p)), ctypes.cast(desc, ctypes.POINTER(ctypes.c_char_p)))
    val = None
    desc = None
appArgs()
o, connectivity, dof, Mat, MatNeumann, d, f, sol, mu = generate(rankWorld, sizeWorld)
status = 0
if sizeWorld > 1:
    A = hpddm.schwarzCreate(Mat, o, connectivity)
    hpddm.schwarzMultiplicityScaling(A, d)
    hpddm.schwarzInitialize(A, d)
    if mu != 0:
        hpddm.schwarzScaledExchange(A, f)
    else:
        mu = 1
    if hpddm.optionSet(opt, b'schwarz_coarse_correction'):
        nu = ctypes.c_ushort(int(hpddm.optionVal(opt, b'geneo_nu')))
        if nu.value > 0:
            if hpddm.optionApp(opt, b'nonuniform'):
                nu.value += max(int(-hpddm.optionVal(opt, b'geneo_nu') + 1), (-1)**rankWorld * rankWorld)
            threshold = hpddm.underlying(max(0, hpddm.optionVal(opt, b'geneo_threshold')))
        if name == '--path':
            path = val
        if name == '--sentence_size':
            sentence_size = int(val)
        if name == '--generate':
            generate_flag = True

    if path == '':
        print('Please input data path!')
        exit(0)
    if sentence_size == 0 and train_flag == False:
        print('Sentence Size Invalid!')
        exit(0)
        
    if train_flag == True:
        train(path)

    if generate_flag == True:
        device = torch.device("cuda")
        word_dict, train_data = pre_process(path)
        dict_size = len(word_dict)
        
        model = PoetryModel(dict_size, Utils.embedding_dim, Utils.hidden_dim)
        model.to(device)
        model.load_state_dict(torch.load('model/poem.pth', 'cuda'))

        poetry = ''.join(generate(model, '日', word_dict, sentence_size - 1, 4))
        print(poetry)

    exit(0)
Exemple #21
0
            if args_condition(args.print_every,steps) or steps == 1:
                est_time =  ((args.n_epochs * iters) - steps) * smooth_time_iter / 60 / 60
                print('epoch %d/%d, iter %d/%d, loss: %.6f, time/iter: %.4f, time elapsed: %s, est: %.2fh'
                    % (epoch, args.n_epochs, its, iters, smooth_loss, smooth_time_iter, time_since(start), est_time))
                #print('epoch %d/%d, iter %d/%d, loss: %.6f, time/iter: %.4f, time elapsed: %s'
                #    % (epoch, args.n_epochs, its, iters, smooth_loss, smooth_time_iter, time_since(start)))
            if args_condition(args.log_every,steps) or steps == 1:
                with open(log_filename, 'a') as lg:
                    lg.write('%d,%.6f,%.6f,%.1f\n' % (steps, loss_avg / args.log_every, smooth_loss, time.time() - start))
                    #lg.write(str(steps) + ',' + str(loss_avg / args.log_every) + ',' + str(smooth_loss) + ',' + 
                    #    str(time.time() - start) + '\n')
                loss_avg = 0
            if args_condition(args.save_every,steps): save(epoch,its)
            if args_condition(args.sample_every,steps):
                try:
                    sample = generate(decoder, random.choice(string.ascii_letters), args.chunk_len*8)
                    print(sample, '\n')
                except RuntimeError:
                    sample = ''
                    pass
            if args_condition(args.log_sample,steps) or steps == 1:
                try:
                    sample = generate(decoder, random.choice(string.ascii_letters), args.chunk_len*8)
                    with open(sample_filename, 'a') as sp:
                        sp.write('=====\nsteps ' + str(steps) + ', loss: ' + str(smooth_loss) +
                            '\n=====\n' + sample + '\n\n')
                except RuntimeError: pass
    save()

except KeyboardInterrupt:
    save()
Exemple #22
0
sys.path.insert(0, './bin') #adds HTSOHM modules to Python


number_of_atom_types = 4
number_of_materials = 50

bins = 5
mutation_strength = 0.2




####
# seed population

generate(number_of_materials,                         # create seed population
         number_of_atom_types)



screen(HTSOHM_dir, 'gen0', 0, number_of_materials)    # screen seed

# WAIT FOR JOBS

prep_gen0(HTSOHM_dir, number_of_materials)            # collect output
find_missing('gen0')                                  # find missing data points




####
# first generation
Exemple #23
0
decoder_optimizer = torch.optim.Adam(decoder.parameters(),
                                     lr=args.learning_rate)
criterion = nn.CrossEntropyLoss()

if args.cuda:
    decoder.cuda()

start = time.time()
all_losses = []
loss_avg = 0

try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in tqdm(range(1, args.n_epochs + 1)):
        loss = train(*random_training_set(args.chunk_len, args.batch_size))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(generate(decoder, all_characters, 'Wh', 100, cuda=args.cuda),
                  '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()
Exemple #24
0
                                     lr=args.learning_rate)
criterion = nn.CrossEntropyLoss()

if args.cuda:
    decoder.cuda()

start = time.time()
all_losses = []
loss_avg = 0

try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in tqdm(range(1, args.n_epochs + 1)):
        loss = train(*random_training_set(args.chunk_len, args.batch_size))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(
                generate(decoder, 119, 0.9, args.test_prompt, cuda=args.cuda),
                '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()
Exemple #25
0
    training_inds = suitable_indices  # TODO implement bottleneck

    for epoch in range(1, args.n_epochs + 1):
        # TODO epochs != iterations
        # TODO make sure ALL training_inds are covered (to avoid inevitable learning bottleneck).
        loss = train(*random_training_set(training_inds, args.batch_size))
        loss_avg += loss

        if epoch % args.print_every == 0:

            preview_inds = training_inds[0:100]

            # TODO Go through preview inds in batches in case it's too many.
            predictions = generate(
                decoder, preview_inds, predict_len=20,
                cuda=not args.no_cuda)  # temperature=args.gen_temp,
            predictions = [pred[1:].split('<')[0] for pred in predictions]
            str_sims = []
            sem_sims = []
            com_accs = []

            for i, pred in zip(preview_inds, predictions):
                str_sim = 1.0 - (distance.levenshtein(idx_to_word[i], pred) /
                                 max(len(idx_to_word[i]), len(pred)))

                if str_sim == 1:
                    sem_sim = 1
                else:
                    j = word_to_idx.get(pred)
                    if j is None:
Exemple #26
0
def sna():
    lst = request.args.get('lst')
    ret = json.dumps(generate(lst))
    return ret
Exemple #27
0
        output, hidden = decoder(inp[c], hidden)
        loss += criterion(output.view(-1), target[c])

    loss.backward()
    decoder_optimizer.step()

    return loss.data[0] / args.chunk_len

def save():
    save_filename = os.path.splitext(os.path.basename(args.filename))[0] + '.pt'
    torch.save(decoder, save_filename)
    print('Saved as %s' % save_filename)

try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in range(1, args.n_epochs + 1):
        loss = train(*random_training_set(args.chunk_len))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(generate(decoder, 'Wh', 100), '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()

Exemple #28
0

charnet = CharRNN(n_chars, args.hidden_size, n_chars, args.model,
                  args.n_layers)
optimizer = torch.optim.Adam(charnet.parameters(), lr=args.learning_rate)
criterion = torch.nn.CrossEntropyLoss()

if args.cuda:
    charnet.cuda()

start = time.time()
all_losses = []
loss_avg = 0

try:

    for epoch in range(1, args.epochs + 1):
        loss = train(*train_set(args.seq_length, args.batch_size))
        loss_avg += loss

        if epoch % args.printtime == 0:
            print '[%s (%d %d%%) %.4f]' % (time_since(start), epoch,
                                           epoch / args.epochs * 100, loss)
            print generate(charnet, 'a', 100, cuda=args.cuda), '\n'

    print("Saving...")
    save_model()

except KeyboardInterrupt:
    print("backing up...")
save_model()
                              fromlist=[modnames[-1]]).__dict__[modnames[-1]]
            funcs[func] = getattr(file, func)
        except AttributeError as error:
            print(
                f"No '{func}' function found in --audioreactive_file, using default..."
            )
            funcs[func] = None
        except:
            if funcs.get(func, "error") == "error":
                print("Error while loading --audioreactive_file...")
                traceback.print_exc()
                exit(1)

    # override with args from the OVERRIDE dict in the specified file
    arg_dict = vars(args).copy()
    try:
        file = __import__(".".join(modnames[:-1]),
                          fromlist=[modnames[-1]]).__dict__[modnames[-1]]
        for arg, val in getattr(file, "OVERRIDE").items():
            arg_dict[arg] = val
            setattr(args, arg, val)
    except:
        pass  # no overrides, just continue

    ckpt = arg_dict.pop("ckpt", None)
    audio_file = arg_dict.pop("audio_file", None)

    # splat kwargs to function call
    # (generate() has all kwarg defaults specified again to make it amenable to ipynb usage)
    generate(ckpt=ckpt, audio_file=audio_file, **funcs, **arg_dict, args=args)
Exemple #30
0
    return loss.data[0] / args.chunk_len


def save():
    save_filename = os.path.splitext(os.path.basename(
        args.filename))[0] + '.pt'
    torch.save(decoder, save_filename)
    print('Saved as %s' % save_filename)


try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in range(1, args.n_epochs + 1):
        loss = train(*random_training_set(args.chunk_len))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            start_char = lang.index2char[random.randint(
                0, len(lang.index2char))]
            print(generate(lang, decoder, start_char, 100), '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()
Exemple #31
0
import sys
sys.path.append(os.getcwd())
import time
import numpy as np
import nn
from csvWrap import *
from filt import *
import extract
from nn import printInfo
from generate import *

#task='xor'
task = 'linearxor'
#task='csv'
#task='mnist'
[inp, out] = generate(task)

INPUTS = len(inp[0])
OUTPUTS = len(out[0])

BATCHSIZE = 1
#raise Exception
#LAYERDIM=[2,1025,2]
#LAYERDIM=[2,500,10,2]
LAYERDIM = [INPUTS, 500, OUTPUTS]
EPOCHS = 100
GAMMA = 0.005
PRINTFREQ = BATCHSIZE

nExamples = len(inp)
Exemple #32
0
)
decoder_optimizer = torch.optim.Adam(decoder.parameters(),
                                     lr=args.learning_rate)
criterion = nn.CrossEntropyLoss()

if args.cuda:
    decoder.cuda()

start = time.time()
all_losses = []
loss_avg = 0

try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in tqdm(range(1, args.n_epochs + 1)):
        loss = train(*random_training_set(args.chunk_len, args.batch_size))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(generate(decoder, 'Wh', 100, cuda=args.cuda), '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()
Exemple #33
0
opt = hpddm.optionGet()
args = ctypes.create_string_buffer(' '.join(sys.argv[1:]).encode('ascii', 'ignore'))
hpddm.optionParse(opt, args, rankWorld == 0)
def appArgs():
    val = (ctypes.c_char_p * 4)()
    (val[0], val[1], val[2], val[3]) = [ b'Nx=<100>', b'Ny=<100>', b'overlap=<1>', b'generate_random_rhs=<0>' ]
    desc = (ctypes.c_char_p * 4)()
    (desc[0], desc[1], desc[2], desc[3]) = [ b'Number of grid points in the x-direction.', b'Number of grid points in the y-direction.', b'Number of grid points in the overlap.', b'Number of generated random right-hand sides.' ]
    hpddm.optionParseInts(opt, args, 4, ctypes.cast(val, ctypes.POINTER(ctypes.c_char_p)), ctypes.cast(desc, ctypes.POINTER(ctypes.c_char_p)))
    (val[0], val[1]) = [ b'symmetric_csr=(0|1)', b'nonuniform=(0|1)' ]
    (desc[0], desc[1]) = [ b'Assemble symmetric matrices.', b'Use a different number of eigenpairs to compute on each subdomain.' ]
    hpddm.optionParseArgs(opt, args, 2, ctypes.cast(val, ctypes.POINTER(ctypes.c_char_p)), ctypes.cast(desc, ctypes.POINTER(ctypes.c_char_p)))
    val = None
    desc = None
appArgs()
o, connectivity, dof, Mat, MatNeumann, d, f, sol, mu = generate(rankWorld, sizeWorld)
status = 0
if sizeWorld > 1:
    A = hpddm.schwarzCreate(Mat, o, connectivity)
    hpddm.schwarzMultiplicityScaling(A, d)
    hpddm.schwarzInitialize(A, d)
    if mu != 0:
        hpddm.schwarzScaledExchange(A, f)
    else:
        mu = 1
    if hpddm.optionSet(opt, b'schwarz_coarse_correction'):
        addr = hpddm.optionAddr(opt, b'geneo_nu')
        nu = ctypes.c_ushort(int(addr.contents.value))
        if nu.value > 0:
            if hpddm.optionApp(opt, b'nonuniform'):
                addr.contents.value += max(int(-addr.contents.value + 1), (-1)**rankWorld * rankWorld)
Exemple #34
0
    loss.backward()
    decoder_optimizer.step()

    return loss.data[0] / args.chunk_len


def save():
    save_filename = os.path.splitext(os.path.basename(
        args.filename))[0] + '.pt'
    torch.save(decoder, save_filename)
    print('Saved as %s' % save_filename)


try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in range(1, args.n_epochs + 1):
        loss = train(*random_training_set(args.chunk_len))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(generate(decoder, u'你', 100), '\n')

    print("Saving....")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()
Exemple #35
0
decoder_optimizer = torch.optim.AdamW(decoder.parameters(),
                                      lr=args.learning_rate)
criterion = nn.CrossEntropyLoss()

if args.cuda:
    decoder.cuda()

start = time.time()
all_losses = []
loss_avg = 0

try:
    print("Training for %d steps..." % args.n_epochs)
    for epoch in tqdm(range(1, args.n_epochs + 1)):
        loss = train(*random_training_set(args.chunk_len, args.batch_size))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(generate(decoder, '\n', 256, cuda=args.cuda), '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()
Exemple #36
0
start = time.time()
all_losses = []
loss_avg = 0

try:
    print("Training for %d epochs..." % args.n_epochs)
    for epoch in tqdm(range(1, args.n_epochs + 1)):
        loss = train(
            *random_training_set(args.chunk_len, args.batch_size, word_model))
        loss_avg += loss

        if epoch % args.print_every == 0:
            print(
                '[%s (%d %d%%) %.4f]' %
                (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
            print(
                generate(decoder,
                         word_model,
                         args.p,
                         priming_str[0],
                         100,
                         cuda=args.cuda), '\n')

    print("Saving...")
    save()

except KeyboardInterrupt:
    print("Saving before quit...")
    save()
from generate import *
import cv2
import os
from PIL import Image

listimgs = os.listdir('frames/')


for ind in range(len(listimgs)):
    x = generate('frames/frame%i.jpg' % (ind))
    x.save('framesConverted/frame%i.jpg' % (ind))
    print(ind)