예제 #1
0
input_sequences = sequences[:, :-1]
target_sequences = sequences[:, 1:]

sample_level_outputs, new_h0 = sample_level_rnn(input_sequences, h0, reset)

cost = T.nnet.categorical_crossentropy(
    T.nnet.softmax(sample_level_outputs.reshape((-1, Q_LEVELS))),
    target_sequences.flatten()
).mean()

# By default we report cross-entropy cost in bits. 
# Switch to nats by commenting out this line:
cost = cost * lib.floatX(1.44269504089)

params = lib.search(cost, lambda x: hasattr(x, 'param'))
lib._train.print_params_info(cost, params)

grads = T.grad(cost, wrt=params, disconnected_inputs='warn')
grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads]

updates = lasagne.updates.adam(grads, params)

train_fn = theano.function(
    [sequences, h0, reset],
    [cost, new_h0],
    updates=updates,
    on_unused_input='warn'
)

generate_outputs, generate_new_h0 = sample_level_rnn(sequences, h0, reset)
예제 #2
0
        from lib.pyramid import youtubedl
    except Exception:
        xbmc.executebuiltin("XBMC.Notification(ThePyramid,Please [COLOR yellow]install the Youtube Addon[/COLOR] module ,10000,"")")
    stream_url=youtubedl.single_YD(url)
    from lib.pyramid import pyramid;pyramid.playsetresolved(stream_url,name,iconimage)
elif mode==1119:from lib.pyramid import pyramid;pyramid.playsetresolved (pyramid.urlsolver(url),name,iconimage,True)	
elif mode==1121:from lib.pyramid import pyramid;pyramid.ytdl_download('',name,'video')
elif mode==1123:from lib.pyramid import pyramid;pyramid.ytdl_download(url,name,'video') 
elif mode==1124:from lib.pyramid import pyramid;pyramid.ytdl_download(url,name,'audio')
elif mode==1125:from lib.pyramid import pyramid;pyramid.search(url);xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==1126:
    name = name.split(':')
    from lib.pyramid import pyramid;pyramid.search(url,search_term=name[1])
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1127:
    from lib.pyramid import pyramid;pyramid.pulsarIMDB=search(url)
    xbmc.Player().play(pulsarIMDB) 
elif mode == 1130:from lib.pyramid import pyramid;pyramid.GetSublinks(name,url,iconimage,fanart)	
elif mode == 1140:from lib.pyramid import pyramid;pyramid.SearchChannels();pyramid.SetViewThumbnail();xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1141 : from lib.pyramid import pyramid;pyramid.Search_input(url);xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1142: from lib.pyramid import pyramid;pyramid.RESOLVE(url)	
elif mode == 1153:from lib.pyramid import pyramid;pyramid.pluginquerybyJSON(url);xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1200: from lib.freeview import freeview;freeview.CATEGORIES()
elif mode == 1201: from lib.freeview import freeview;freeview.play(url)
elif mode == 1202: from lib.freeview import freeview;freeview.tvplayer(url)
elif mode == 1300: from lib import apprentice;apprentice.apprentice_Main()
elif mode == 1301 : from lib import apprentice;apprentice.Mov_Menu()
elif mode == 1302 : from lib import apprentice;apprentice.Tv_Menu()
elif mode == 1303 : from lib import apprentice;apprentice.Second_Menu(url)
elif mode == 1304 : from lib import apprentice;apprentice.Index_List_Mov()
elif mode == 1305 : from lib import apprentice;apprentice.Main_Loop(url)
예제 #3
0
                test_mask
            )
        valid_costs.append(valid_loss)
    print "Validation Completed! cost: {} time: {}".format(np.mean(np.asarray(valid_costs),axis=0),time.time()-start)
    return np.mean(np.asarray(valid_costs),axis=0)[1]

if __name__=='__main__':
    batch_size = T.shape(X)[0]
    # readout = DurationPredictor(X,drop_prob)
    # readout = RecurrentPredictor(X,drop_prob,mask=mask)
    readout = DeepVoice(X,drop_prob)
    batch_size = T.shape(readout)[0]
    cost = T.sum(T.sqr(readout-Y)*mask)/T.sum(mask)
    abs_cost = T.sum(T.abs_(readout-Y)*mask)/T.sum(mask)

    params = lib.search(cost, lambda x: hasattr(x, "param"))
    lib.print_params_info(params)
    grads = T.grad(cost, wrt=params, disconnected_inputs='warn')
    grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads]

    print "Gradients Computed"
    updates = lasagne.updates.adam(grads, params, learning_rate=learn_rate)

    train_fn = theano.function(
        [drop_prob,X,Y,mask,learn_rate],
        [cost,abs_cost],
        updates=updates,
        on_unused_input='warn'
    )

    test_fn = theano.function(
    mode='open-loop-rnn'
)

readout = lib.ops.Linear(
    'Generator.GRU.Output.MLP.1',
    T.concatenate([state[:,:,-1],tiled_speaker],-1),
    DEC_DIM+SPEAKER_DIM,
    OUTPUT_DIM
)

mask_mult = T.shape_padright(mask)

cost = T.sum(T.sqr(X-readout)*mask_mult)/(T.sum(mask)*63.)
test_cost = T.sum(T.sqr(X-predict_readout)*T.shape_padright(mask))/(T.sum(mask)*63.)

params = lib.search(cost, lambda x: hasattr(x, "param") and x.param==True)
lib.print_params_info(params)

grads = T.grad(cost, wrt=params, disconnected_inputs='warn')
grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads]

print "Gradients Computed"

updates = lasagne.updates.adam(grads, params, learning_rate=learn_rate)

train_fn = theano.function(
    [noise_vocoder,X,spkr_ids,ctx,mask,learn_rate],
    cost,
    updates=updates,
    on_unused_input='warn'
)
예제 #5
0
    pyramid.ytdl_download(url, name, 'video')
elif mode == 1124:
    from lib.pyramid import pyramid
    pyramid.ytdl_download(url, name, 'audio')
elif mode == 1125:
    from lib.pyramid import pyramid
    pyramid.search(url)
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1126:
    name = name.split(':')
    from lib.pyramid import pyramid
    pyramid.search(url, search_term=name[1])
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1127:
    from lib.pyramid import pyramid
    pyramid.pulsarIMDB = search(url)
    xbmc.Player().play(pulsarIMDB)
elif mode == 1130:
    from lib.pyramid import pyramid
    pyramid.GetSublinks(name, url, iconimage, fanart)
elif mode == 1140:
    from lib.pyramid import pyramid
    pyramid.SearchChannels()
    pyramid.SetViewThumbnail()
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1141:
    from lib.pyramid import pyramid
    pyramid.Search_input(url)
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1142:
    from lib.pyramid import pyramid
예제 #6
0
	#				f = search(gp*100+user, doc, rank, th, 100*(i+1), beta=0.2)
	#				hit = 0
	#				for u in f:
	#					if u[2] == gp: hit += 1
	#				hr[i].append(hit)
	#			th += 0.05 
	#		w.writerows(hr)
	#		print "insert result successfully..."
	
	# test the metric.
	w = csv.writer(open('metric_sc.csv', 'wb'), delimiter=',')
	for gp in range(10):
		for user in range(100): # every user.
			hr = [0]*10
			for p in range(10):
				f = search(gp*100+user, doc, rank, 0, 100*(p+1), beta=0.2)
				hit = 0
				for u in f:
					if u[2] == gp:
						hit += 1
				hr[p] = hit
			w.writerow(hr)

	#hr = [0]*10
	#for cl in range(10):
	#	for i in range(10):
	#		f = search(cl*100+23, doc, rank, 0, 100*(i+1))
	#		hit = 0
	#		for u in f:
	#			if u[2] == cl:
	#				hit += 1
예제 #7
0
파일: app.py 프로젝트: taybenlor/bikka
def search_handler(response):
    query = response.get_field('query')
    data = {'isloggedin': isloggedin(response), 'results': search(query), 'query':query, 'username': currentuser(response).username}
    response.write(template.render_file("templates/search_result.html", data))
예제 #8
0
if __name__ == '__main__':
    rec = Recognizer(X, 0.)
    h = rec['CNN.1']
    # readout = RecurrentMapper(ctx)
    readout = ConvolutionalMapper(ctx)
    # predict_readout = create_graph(X_concat,mode='test')
    predict_readout = readout

    mask_mult = T.shape_padright(mask)
    cost = T.sum(T.sqr(readout - h) * mask_mult) / (T.sum(mask_mult))
    test_cost = T.sum(
        T.sqr(predict_readout - h) * mask_mult) / (T.sum(mask_mult))

    params = lib.search(
        cost, lambda x: hasattr(x, "param") and x.param == True and
        'Mapper.Generator' in x.name)
    lib.print_params_info(params)
    grads = T.grad(cost, wrt=params, disconnected_inputs='warn')

    print "Gradients Computed"

    updates = lasagne.updates.adam(grads, params, learning_rate=learn_rate)
    # for x,y in lib._updates.iteritems():
    #     if 'Generator' in x.name:
    #         updates[x] = y
    #         print "Adding update: ",x.name

    train_fn = theano.function([X, ctx, mask, learn_rate],
                               cost,
                               updates=updates,
예제 #9
0
def train_loop(inputs,
               cost,
               train_data,
               times,
               prints=None,
               inject_total_iters=False,
               test_data=None,
               callback=None,
               optimizer=lasagne.updates.adam,
               save_params=False,
               nan_guard=False):

    params = lib.search(cost, lambda x: hasattr(x, 'param'))
    lib.print_params_info(params)

    grads = T.grad(cost, wrt=params, disconnected_inputs='warn')

    grads = [T.clip(g, lib.floatX(-1), lib.floatX(1)) for g in grads]

    updates = optimizer(grads, params)

    if prints is None:
        prints = [('cost', cost)]
    else:
        prints = [('cost', cost)] + prints

    print "Compiling train function..."
    if nan_guard:
        from theano.compile.nanguardmode import NanGuardMode
        mode = NanGuardMode(nan_is_error=True,
                            inf_is_error=True,
                            big_is_error=True)
    else:
        mode = None
    train_fn = theano.function(inputs, [p[1] for p in prints],
                               updates=updates,
                               on_unused_input='warn',
                               mode=mode)

    print "Compiling eval function..."
    eval_fn = theano.function(inputs, [p[1] for p in prints],
                              on_unused_input='warn')

    print "Training!"

    total_iters = 0
    total_seconds = 0.
    last_print = 0
    last_gen = 0

    if len(times) >= 4:
        gen_every = times[3]
    else:
        gen_every = times[1]

    if len(times) >= 5:
        early_stop = times[4]
        if len(times) >= 6:
            early_stop_min = times[5]
        else:
            early_stop_min = 0
    else:
        early_stop = None
        early_stop_min = None

    best_test_cost = np.inf
    best_test_cost_iter = 0.

    all_outputs = []
    all_stats = []
    for epoch in itertools.count():

        generator = train_data()
        while True:
            try:
                inputs = generator.__next__()
            except StopIteration:
                break

            if inject_total_iters:
                inputs = [np.int32(total_iters)] + list(inputs)

            start_time = time.time()
            outputs = train_fn(*inputs)
            total_seconds += time.time() - start_time
            total_iters += 1

            all_outputs.append(outputs)

            if total_iters == 1:
                try:  # This only matters on Ishaan's computer
                    import experiment_tools
                    experiment_tools.register_crash_notifier()
                except ImportError:
                    pass

            if (times[0]=='iters' and total_iters-last_print == times[1]) or \
                (times[0]=='seconds' and total_seconds-last_print >= times[1]):

                mean_outputs = np.array(all_outputs).mean(axis=0)

                if test_data is not None:
                    if inject_total_iters:
                        test_outputs = [
                            eval_fn(np.int32(total_iters), *inputs)
                            for inputs in test_data()
                        ]
                    else:
                        test_outputs = [
                            eval_fn(*inputs) for inputs in test_data()
                        ]
                    test_mean_outputs = np.array(test_outputs).mean(axis=0)

                stats = collections.OrderedDict()
                stats['epoch'] = epoch
                stats['iters'] = total_iters
                for i, p in enumerate(prints):
                    stats['train ' + p[0]] = mean_outputs[i]
                if test_data is not None:
                    for i, p in enumerate(prints):
                        stats['test ' + p[0]] = test_mean_outputs[i]
                stats['secs'] = total_seconds
                stats['secs/iter'] = total_seconds / total_iters

                if test_data != None and (stats['test cost'] < best_test_cost
                                          or
                                          (early_stop_min != None
                                           and total_iters <= early_stop_min)):
                    best_test_cost = stats['test cost']
                    best_test_cost_iter = total_iters

                print_str = ""
                for k, v in stats.items():
                    if isinstance(v, int):
                        print_str += "{}:{}\t".format(k, v)
                    else:
                        print_str += "{}:{:.4f}\t".format(k, v)
                print print_str[:-1]  # omit the last \t

                all_stats.append(stats)

                all_outputs = []
                last_print += times[1]

            if (times[0]=='iters' and total_iters-last_gen==gen_every) or \
                (times[0]=='seconds' and total_seconds-last_gen >= gen_every):
                tag = "iters{}_time{}".format(total_iters, total_seconds)
                if callback is not None:
                    callback(tag)
                if save_params:
                    lib.save_params('params_{}.pkl'.format(tag))

                last_gen += gen_every

            if (times[0]=='iters' and total_iters == times[2]) or \
                (times[0]=='seconds' and total_seconds >= times[2]) or \
                (test_data != None and early_stop != None and total_iters > (3*early_stop) and (total_iters-best_test_cost_iter) > early_stop):

                if (test_data != None and early_stop != None and total_iters >
                    (3 * early_stop)
                        and (total_iters - best_test_cost_iter) > early_stop):
                    print "Early stop! Best test cost was {} at iter {}".format(
                        best_test_cost, best_test_cost_iter)

                print "Done!"

                try:  # This only matters on Ishaan's computer
                    import experiment_tools
                    experiment_tools.send_sms("done!")
                except ImportError:
                    pass

                return all_stats
예제 #10
0
sample_level_outputs = sample_level_predictor(
    frame_level_outputs.reshape((BATCH_SIZE * SEQ_LEN, DIM)), prev_samples)

cost = T.nnet.categorical_crossentropy(T.nnet.softmax(sample_level_outputs),
                                       target_sequences.flatten()).mean()

# By default we report cross-entropy cost in bits.
# Switch to nats by commenting out this line:
cost = cost * lib.floatX(1.44269504089)

ip_cost = lib.floatX(1.44269504089) * T.nnet.categorical_crossentropy(
    T.nnet.softmax(big_frame_independent_preds.reshape(
        (-1, Q_LEVELS))), target_sequences.flatten()).mean()

all_params = lib.search(cost, lambda x: hasattr(x, 'param'))
ip_params = lib.search(
    ip_cost, lambda x: hasattr(x, 'param') and 'BigFrameLevel' in x.name)
other_params = [p for p in all_params if p not in ip_params]
all_params = ip_params + other_params
lib._train.print_params_info(ip_cost, ip_params)
lib._train.print_params_info(cost, other_params)
lib._train.print_params_info(cost, all_params)

ip_grads = T.grad(ip_cost, wrt=ip_params, disconnected_inputs='warn')
ip_grads = [
    T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in ip_grads
]

other_grads = T.grad(cost, wrt=other_params, disconnected_inputs='warn')
other_grads = [
예제 #11
0
def create_encoder_decoder():

    input_var = T.tensor3('input')
    input_var_normalised = (input_var - floatX(0.5))

    mu, log_square_sigma = Encoder(input_var_normalised)

    mu = lib.floatX(2.) * T.tanh(mu / lib.floatX(2.))

    sampled_z = gaussian_sampler(mu, log_square_sigma)

    reconstructed = Decoder(sampled_z)

    reconstruction_cost = T.nnet.binary_crossentropy(
        reconstructed.reshape((reconstructed.shape[0], -1)),
        input_var.reshape((input_var.shape[0], -1))).sum(axis=1)

    kl_cost = KL_with_standard_gaussian(mu, log_square_sigma)

    loss = T.mean(kl_cost + reconstruction_cost)

    params = lib.search(loss,
                        lambda x: hasattr(x, 'param') and x.param == True)
    lib.print_params_info(params)

    grads = T.grad(loss, wrt=params, disconnected_inputs='warn')
    grads = [T.clip(g, lib.floatX(-1.), lib.floatX(1.)) for g in grads]

    lr = T.scalar('lr')

    updates = lasagne.updates.adam(grads,
                                   params,
                                   learning_rate=lr,
                                   epsilon=EPS)

    generated_z = T.matrix('generated_z')

    generated_samples = Decoder(generated_z)

    print "Compiling functions ..."

    train_fn = theano.function(
        [input_var, lr],
        [
            loss,
            kl_cost.mean(),
            mu.min(),
            mu.max(), mu,
            sampled_z.min(),
            sampled_z.max()
        ],
        updates=updates,
        # mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
    )

    reconstruct_fn = theano.function([input_var], reconstructed)

    val_fn = theano.function(
        [input_var],
        [
            loss,
            kl_cost.mean(),
            mu.min(),
            mu.max(), mu,
            sampled_z.min(),
            sampled_z.max()
        ],
        # mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
    )

    generate_fn = theano.function([generated_z], generated_samples)

    encode_fn = theano.function([input_var], mu)

    return train_fn, val_fn, generate_fn, reconstruct_fn, encode_fn
예제 #12
0
def train_loop(
    inputs,
    cost,
    train_data,
    times,
    prints=None,
    inject_total_iters=False,
    test_data=None,
    callback=None,
    optimizer=lasagne.updates.adam,
    save_params=False,
    nan_guard=False
    ):

    params = lib.search(cost, lambda x: hasattr(x, 'param'))
    lib.print_params_info(params)

    grads = T.grad(cost, wrt=params, disconnected_inputs='warn')
    grads = [T.clip(g, lib.floatX(-1), lib.floatX(1)) for g in grads]

    updates = optimizer(grads, params)

    if prints is None:
        prints = [('cost', cost)]
    else:
        prints = [('cost', cost)] + prints

    print "Compiling train function..."
    if nan_guard:
        from theano.compile.nanguardmode import NanGuardMode
        mode = NanGuardMode(
            nan_is_error=True, 
            inf_is_error=True, 
            big_is_error=True
        )
    else:
        mode = None
    train_fn = theano.function(
        inputs,
        [p[1] for p in prints],
        updates=updates,
        on_unused_input='warn',
        mode=mode
    )

    print "Compiling eval function..."
    eval_fn = theano.function(
        inputs,
        [p[1] for p in prints],
        on_unused_input='warn'
    )

    print "Training!"
    total_iters = 0
    total_seconds = 0.
    last_print = 0
    all_outputs = []
    all_stats = []
    for epoch in itertools.count():

        for inputs in train_data():

            if inject_total_iters:
                inputs = [np.int32(total_iters)] + list(inputs)

            start_time = time.time()
            outputs = train_fn(*inputs)
            total_seconds += time.time() - start_time
            total_iters += 1

            all_outputs.append(outputs)

            if total_iters == 1:
                try: # This only matters on Ishaan's computer
                    import experiment_tools
                    experiment_tools.register_crash_notifier()
                except ImportError:
                    pass

            if (times[0]=='iters' and total_iters-last_print == times[1]) or \
                (times[0]=='seconds' and total_seconds-last_print >= times[1]):

                mean_outputs = np.array(all_outputs).mean(axis=0)

                if test_data is not None:
                    if inject_total_iters:
                        test_outputs = [
                            eval_fn(np.int32(total_iters), *inputs)
                            for inputs in test_data()
                        ]
                    else:
                        test_outputs = [
                            eval_fn(*inputs) 
                            for inputs in test_data()
                        ]
                    test_mean_outputs = np.array(test_outputs).mean(axis=0)

                stats = collections.OrderedDict()
                stats['epoch'] = epoch
                stats['iters'] = total_iters
                for i,p in enumerate(prints):
                    stats['train '+p[0]] = mean_outputs[i]
                if test_data is not None:
                    for i,p in enumerate(prints):
                        stats['test '+p[0]] = test_mean_outputs[i]
                stats['secs'] = total_seconds
                stats['secs/iter'] = total_seconds / total_iters

                print_str = ""
                for k,v in stats.items():
                    if isinstance(v, int):
                        print_str += "{}:{}\t".format(k,v)
                    else:
                        print_str += "{}:{:.4f}\t".format(k,v)
                print print_str[:-1] # omit the last \t

                all_stats.append(stats)

                tag = "iters{}_time{}".format(total_iters, total_seconds)
                if callback is not None:
                    callback(tag)
                if save_params:
                    lib.save_params('params_{}.pkl'.format(tag))

                all_outputs = []
                last_print += times[1]

            if (times[0]=='iters' and total_iters == times[2]) or \
                (times[0]=='seconds' and total_seconds >= times[2]):

                print "Done!"

                try: # This only matters on Ishaan's computer
                    import experiment_tools
                    experiment_tools.send_sms("done!")
                except ImportError:
                    pass

                return all_stats
예제 #13
0
cost = T.nnet.categorical_crossentropy(
    T.nnet.softmax(sample_level_outputs),
    target_sequences.flatten()
).mean()

# By default we report cross-entropy cost in bits. 
# Switch to nats by commenting out this line:
cost = cost * lib.floatX(1.44269504089)

ip_cost = lib.floatX(1.44269504089) * T.nnet.categorical_crossentropy(
    T.nnet.softmax(big_frame_independent_preds.reshape((-1, Q_LEVELS))),
    target_sequences.flatten()
).mean()

all_params = lib.search(cost, lambda x: hasattr(x, 'param'))
ip_params = lib.search(ip_cost, lambda x: hasattr(x, 'param') and 'BigFrameLevel' in x.name)
other_params = [p for p in all_params if p not in ip_params]
all_params = ip_params + other_params
lib._train.print_params_info(ip_cost, ip_params)
lib._train.print_params_info(cost, other_params)
lib._train.print_params_info(cost, all_params)

ip_grads = T.grad(ip_cost, wrt=ip_params, disconnected_inputs='warn')
ip_grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in ip_grads]

other_grads = T.grad(cost, wrt=other_params, disconnected_inputs='warn')
other_grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in other_grads]

grads = T.grad(cost, wrt=all_params, disconnected_inputs='warn')
grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads]
예제 #14
0
disc_out = Discriminator(T.concatenate([real_data, fake_data], axis=0))
disc_real = disc_out[:BATCH_SIZE]
disc_fake = disc_out[BATCH_SIZE:]

gen_cost = -T.mean(Discriminator(Generator(2 * BATCH_SIZE)))
disc_cost = T.mean(disc_fake) - T.mean(disc_real)

alpha = srng.uniform(size=(BATCH_SIZE, 1), low=0., high=1.)
differences = fake_data - real_data
interpolates = real_data + (alpha * differences)
gradients = T.grad(T.sum(Discriminator(interpolates)), interpolates)
slopes = T.sqrt(T.sum(T.sqr(gradients), axis=1))
lipschitz_penalty = T.mean((slopes - 1.)**2)
disc_cost += 10 * lipschitz_penalty

gen_params = lib.search(
    gen_cost, lambda x: hasattr(x, 'param') and 'Generator' in x.name)
discrim_params = lib.search(
    disc_cost, lambda x: hasattr(x, 'param') and 'Discriminator' in x.name)

gen_grads = T.grad(gen_cost, gen_params)
discrim_grads = T.grad(disc_cost, discrim_params)

# Just so logging code doesn't break
gen_grad_norm, discrim_grad_norm = T.as_tensor_variable(
    np.float32(0)), T.as_tensor_variable(np.float32(0))

gen_grads, gen_grad_norm = lasagne.updates.total_norm_constraint(
    gen_grads, 50.0, return_norm=True)
discrim_grads, discrim_grad_norm = lasagne.updates.total_norm_constraint(
    discrim_grads, 50.0, return_norm=True)
예제 #15
0
파일: sin_lstm.py 프로젝트: igul222/nn
gen_cost = -T.mean(Discriminator(fake_data_4x))
disc_cost = T.mean(disc_fake) - T.mean(disc_real)

alpha = srng.uniform(
    size=(BATCH_SIZE,1), 
    low=0.,
    high=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
gradients = T.grad(T.sum(Discriminator(interpolates)), interpolates)
slopes = T.sqrt(T.sum(T.sqr(gradients), axis=1))
lipschitz_penalty = T.mean((slopes-1.)**2)
disc_cost += 10*lipschitz_penalty

gen_params     = lib.search(gen_cost,     lambda x: hasattr(x, 'param') and 'Generator' in x.name)
discrim_params = lib.search(disc_cost, lambda x: hasattr(x, 'param') and 'Discriminator' in x.name)

gen_grads       = T.grad(gen_cost, gen_params)
discrim_grads   = T.grad(disc_cost, discrim_params)
gen_grads = [
    T.clip(g, lib.floatX(-1.0), lib.floatX(1.0))
    for g in gen_grads
]
discrim_grads = [
    T.clip(g, lib.floatX(-1.0), lib.floatX(1.0))
    for g in discrim_grads
]
gen_updates     = lasagne.updates.adam(gen_grads,     gen_params,     learning_rate=1e-4, beta1=0.5, beta2=0.9)
discrim_updates = lasagne.updates.adam(discrim_grads, discrim_params, learning_rate=1e-4, beta1=0.5, beta2=0.9)
예제 #16
0
#prev_samples = sequences[:, :-1]
#prev_samples = prev_samples.reshape((1, BATCH_SIZE, 1, -1))
#prev_samples = T.nnet.neighbours.images2neibs(prev_samples, (1, FRAME_SIZE), neib_step=(1, 1), mode='valid')
#prev_samples = prev_samples.reshape((BATCH_SIZE,SEQ_LEN, FRAME_SIZE))

encoder_outputs, new_h0 = encoder(input_sequences, h0, reset)

#decoder_outputs = decoder(encoder_outputs,prev_samples)

cost = T.nnet.categorical_crossentropy(T.nnet.softmax(encoder_outputs),
                                       target_sequences.flatten()).mean()

cost = cost * lib.floatX(1.44269504089)

params = lib.search(cost, lambda x: hasattr(x, 'param'))

lib.print_params_info(cost, params)

grads = T.grad(cost, wrt=params, disconnected_inputs='warn')

grads = [
    T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads
]

print "Gradients Computed"

updates = lasagne.updates.adam(grads, params, learning_rate=lr)

train_fn = theano.function([sequences, h0, reset, lr], [cost, new_h0],
                           updates=updates,
예제 #17
0
파일: default.py 프로젝트: toall1985/origin
    pyramid.ytdl_download(url, name, "audio")
elif mode == 1125:
    from lib.pyramid import pyramid

    pyramid.search(url)
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1126:
    name = name.split(":")
    from lib.pyramid import pyramid

    pyramid.search(url, search_term=name[1])
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1127:
    from lib.pyramid import pyramid

    pyramid.pulsarIMDB = search(url)
    xbmc.Player().play(pulsarIMDB)
elif mode == 1130:
    from lib.pyramid import pyramid

    pyramid.GetSublinks(name, url, iconimage, fanart)
elif mode == 1140:
    from lib.pyramid import pyramid

    pyramid.SearchChannels()
    pyramid.SetViewThumbnail()
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode == 1141:
    from lib.pyramid import pyramid

    pyramid.Search_input(url)