Пример #1
0
def synthesize(content, style,
        iterations=1000, learning_rate=1e0,
        content_weight=5, style_weight=1e2, smooth_weight=1e2):

    # XXX just to make faster on my machine
    content_im = imresize(content, [256, 256])
    style_im = imresize(style, [256, 256])
    content_rep = get_content_rep(content_im)
    style_rep = get_style_rep(style_im)

    # optimizing the image
    shape = (1,) + content_im.shape
    with tf.Graph().as_default():
        image = tf.Variable(tf.random_normal(shape) * 0.2)
        net, channel_avg = get_network(image)
 
        cont_loss = content_loss_op(net, content_rep, content_weight)
        style_loss = style_loss_op(net, style_rep, style_weight)
        smooth_loss = smoothing_loss_op(image, smooth_weight)
        total_loss = 2 * (cont_loss + style_loss + smooth_loss)

        train_step = tf.train.AdamOptimizer(learning_rate).\
                                minimize(total_loss)

        im_out = None
        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            for i in xrange(iterations):
                print 'Iteration', i
                train_step.run()
            im_out = image.eval().squeeze() + channel_avg
    return im_out
Пример #2
0
def train(config: Config, outfile):
    """
    Run the training loop.
    This means doing a number of self-play games,
    saving the states of those games,
    and then running a few batches of gradient descent on the network.
    """
    model = network.get_network(True, config)
    replays = ReplayBuffer(config, outfile)

    def replays_generator():
        while True:
            yield replays.sample_batch()

    # Make a directory to save checkpoints in
    os.makedirs(config.checkpoint_dir, exist_ok=True)

    # Single threaded version.
    for i in range(config.training_steps // config.batches_per_step):
        # Save checkpoint
        if (i + 1) % config.checkpoint_interval == 0:
            model.save(
                os.path.join(config.checkpoint_dir, config.checkpoint_fname))

        # Self-play some games
        progbar = keras.utils.Progbar(config.games_per_step)
        progbar.update(0)
        for g in range(config.games_per_step):
            replays.save_game(self_play(model, config))
            progbar.update(g + 1)
        # Might want to create a better training loop than this
        model.fit(replays_generator(),
                  steps_per_epoch=config.batches_per_step,
                  initial_epoch=i,
                  epochs=i + 1)
Пример #3
0
def get_networks(project_id,
                 include_resources=True,
                 summary=False,
                 include_data='N',
                 **kwargs):
    """
        Get all networks in a project
        Returns an array of network objects.
    """
    log.info("Getting networks for project %s", project_id)
    user_id = kwargs.get('user_id')
    project = _get_project(project_id)
    project.check_read_permission(user_id)

    rs = DBSession.query(
        Network.network_id,
        Network.status).filter(Network.project_id == project_id).all()
    networks = []
    for r in rs:
        if r.status != 'A':
            continue
        try:
            net = network.get_network(r.network_id,
                                      include_resources=include_resources,
                                      summary=summary,
                                      include_data=include_data,
                                      **kwargs)
            log.info("Network %s retrieved", net.network_name)
            networks.append(net)
        except PermissionError:
            log.info("Not returning network %s as user %s does not have "
                     "permission to read it." % (r.network_id, user_id))

    return networks
Пример #4
0
def compute_model_f1_score(dataset, network, K, seed):
    random.seed(seed)

    # get the incidence matrix
    S = get_network(dataset, network)
    hyedges_count = S.shape[1]

    # get missing hyedges list used for K-fold cross validation
    missing_hyedges_indices_list = get_missing_hyedges_indices(
        hyedges_count, K)

    f1_score_list = []
    for k in range(K):
        print("round - " + str(k))

        missing_hyedges_indices = missing_hyedges_indices_list[k]
        existing_hyedges_indices = np.sort(
            list(set(list(range(S.shape[1]))) - set(missing_hyedges_indices)))

        print("getting incidence matrix")
        H = S[:, existing_hyedges_indices]

        # checking whether the missing hyedges contain any singleton nodes
        print("validating missing hyedges")
        missing_hyedges = get_hyedges_from_indices(S, missing_hyedges_indices)
        valid_missing_hyedges, valid_missing_hyedges_indices = validate_hyedges(
            missing_hyedges, missing_hyedges_indices, H)

        print("getting pairwise scores")
        hra_scores = get_hra_scores(H)

        print("getting the hyedges degree distribution")
        hyedges_degree, hyedges_degree_frequencies = get_hyedges_degree_dist(H)

        print("predicting missing hyedges")
        predicted_hyedges = []
        for j in range(len(valid_missing_hyedges)):
            sample_hyedge_degree = np.random.choice(
                hyedges_degree, replace=True, p=hyedges_degree_frequencies)
            predicted_hyedges.append(
                predict_hyperedge(H, hra_scores, sample_hyedge_degree))

        print("computing f1 score")
        f1_score = compute_avg_f1_score(valid_missing_hyedges,
                                        predicted_hyedges)

        f1_score_list.append(f1_score)
        print("f1 score - " + str(f1_score))

    print("\n")
    print("EVALUATION DETAILS ")

    print("HPRA MODEL RESULTS ")
    print("f1 score list - ")
    print(f1_score_list)

    print("average f1 score - " +
          str(np.sum(f1_score_list) / len(f1_score_list)))
    print("std - f1 score - " + str(np.std(f1_score_list, dtype=np.float64)))
Пример #5
0
 def __init__(self, cfg):
     self.reader = Reader(cfg)
     self.m = get_network(cfg, self.reader.vocab)
     self.EV = Evaluator  # evaluator class
     if cfg.cuda:
         self.m = self.m.cuda()
     self.base_epoch = -1
     self.cfg = cfg
Пример #6
0
def main(cfg):

    if cfg['mode'] == 'train':
        train_dataset = get_dataset(mode=cfg['mode'], cfg=cfg)
        val_dataset = get_dataset(mode='val', cfg=cfg)
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=cfg['train']['batch_size'],
            num_workers=8,
            shuffle=True,
            collate_fn=collate_remove_none,
            worker_init_fn=worker_init_fn)
        val_loader = torch.utils.data.DataLoader(
            val_dataset,
            batch_size=cfg['val']['batch_size'],
            num_workers=8,
            shuffle=False,
            collate_fn=collate_remove_none,
            worker_init_fn=worker_init_fn)
        model = get_network(cfg, device='cuda:0', dataset=train_dataset)
    else:
        test_dataset = get_dataset(mode=cfg['mode'], cfg=cfg, return_idx=True)
        test_loader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=1,
                                                  num_workers=4,
                                                  shuffle=False)
        model = get_network(cfg, device='cuda:0', dataset=test_dataset)

    if cfg['mode'] == 'train':
        optimizer = optim.Adam(model.parameters(), lr=1e-4)
    else:
        optimizer = None

    if cfg['mode'] == 'train':
        checkpoint = CheckpointIO(cfg['out']['checkpoint_dir'],
                                  model=model,
                                  optimizer=optimizer)
        load_dict = checkpoint.load(cfg['train']['pretrained'])
        train(train_loader, val_loader, model, optimizer, checkpoint, cfg)
    else:
        checkpoint = CheckpointIO(cfg['out']['checkpoint_dir'], model=model)
        load_dict = checkpoint.load(cfg['test']['pretrained'])
        test(test_loader, test_dataset, model, cfg)
Пример #7
0
def get_content_rep(content_im):
    nrows, ncols, nchannels = content_im.shape
    content_rep = {}
    g = tf.Graph()
    with g.as_default(), tf.Session() as sess:
        image = tf.placeholder('float',
                shape=(1,nrows,ncols,nchannels))
        net, channel_avg = get_network(image)
        # get network responses for our content image
        # output size is 1 x nrows x ncols x nfilters
        content_rep[CONTENT_LAYER] = net[CONTENT_LAYER].eval(
                feed_dict={image:[content_im - channel_avg]})
    return content_rep
Пример #8
0
def train():
    batch_producer = producer(proj_path, cfg)

    train_net = get_network('train', cfg)

    with tf.Session(config=make_TFconfig()) as sess:
        sw = TrainWrapper(proj_path, cfg, train_net)
        # sw = TrainWrapper(proj_path, cfg, )
        sw.train_model(sess=sess,
                       cfg=cfg,
                       producer=batch_producer.producer,
                       max_iters=cfg.TRAIN.MAX_ITER,
                       restore=False)
Пример #9
0
def main():
    parser = argparse.ArgumentParser(description="Splitting parameters")
    parser.add_argument('voice_list_dir',
                        type=str,
                        metavar='v',
                        help="recording list dir")
    parser.add_argument('save_dir',
                        type=str,
                        metavar='s',
                        help="file save directory")
    args = parser.parse_args()

    print('Loading Voice Sample...')
    print("performing filters/mfcc")
    vad_obj = webrtcvad.Vad(2)
    mfc_obj = MFCC(nfilt=64,
                   lowerf=20.,
                   upperf=7200.,
                   samprate=16000,
                   nfft=1024,
                   wlen=0.025)

    print('Initializing networks...')
    e_net, e_optimizer = get_network('e', NETWORKS_PARAMETERS, train=False)

    with open(args.voice_list_dir) as file_list:
        line = file_list.readline()
        line = line.rstrip()
        i = 1
        while line:
            print('making embedding for {}'.format(line))
            embedding = get_embedding(e_net, line, vad_obj, mfc_obj)
            i += 1
            stuff = line.split("/")
            output_name = "{}_{}".format(stuff[6], stuff[7])
            output_name = output_name.replace(".wav", "")
            output_name = output_name + ".npy"
            save_dir = os.path.join(args.save_dir, output_name)
            print('Saving embedding to: {}'.format(save_dir))
            np.save(save_dir,
                    embedding.cpu().detach().numpy(),
                    allow_pickle=True)

            line = file_list.readline()
            line = line.rstrip()
    print("complete")
Пример #10
0
def get_style_rep(style_im):
    nrows, ncols, nchannels = style_im.shape
    style_rep = {}
    g = tf.Graph()
    with g.as_default(), tf.Session() as sess:
        image = tf.placeholder('float',
                shape=(1,nrows,ncols,nchannels))
        net, channel_avg = get_network(image)
        # get network responses of style image
        for layer in STYLE_LAYERS:
            # output size is 1 x nrows x ncols x nfilters
            responses = net[layer].eval(
                    feed_dict={image:[style_im - channel_avg]})
            # compute cross correlations between filter responses
            nfilters = responses.shape[-1]
            responses = responses.reshape((-1, nfilters))
            gram = responses.T.dot(responses)
            style_rep[layer] = gram / responses.size
    return style_rep
Пример #11
0
def main():

    if len(sys.argv) != 3:
        print "USAGE: python keypoint_detector.py"\
            " <training_file> <testing_file>"
        sys.exit(-1)

    # build the network
    net = network.get_network()

    X, y = load_data(sys.argv[1], sys.argv[2])
    X = X.reshape(-1, 1, 96, 96)
    net.fit(X, y)

    # save model
    net.save_params_to('model_params')
    net.save_weights_to('model_weights')

    with open('net.pickle', 'wb') as f:
        pickle.dump(net, f, -1)

    return
Пример #12
0
    def __init__(self, config, word_dict=None, classes=None, ckpt=None):
        self.config = config
        self.device = config.device
        self.start_epoch = 0

        if ckpt:
            self.config.run_name = ckpt['run_name']
            self.word_dict = ckpt['word_dict']
            self.classes = ckpt['classes']
            self.best_metric = ckpt['best_metric']
            self.start_epoch = ckpt['epoch']
        else:
            self.word_dict = word_dict
            self.classes = classes
            self.start_epoch = 0
            self.best_metric = 0

            # load embedding
            if os.path.exists(config.embed_file):
                log.info(f'Load pretrained embedding from file: {config.embed_file}.')
                embedding_weights = data_utils.get_embedding_weights_from_file(self.word_dict, config.embed_file)
                self.word_dict.set_vectors(self.word_dict.stoi, embedding_weights,dim=embedding_weights.shape[1], unk_init=False)
            elif not config.embed_file.isdigit():
                log.info(f'Load pretrained embedding from torchtext.')
                self.word_dict.load_vectors(config.embed_file)
            else:
                raise NotImplementedError
        self.config.num_class = len(self.classes)

        embed_vecs = self.word_dict.vectors
        self.network = get_network(config, embed_vecs).to(self.device)
        self.init_optimizer()

        if ckpt:
            self.network.load_state_dict(ckpt['state_dict'])
            self.optimizer.load_state_dict(ckpt['optimizer'])
Пример #13
0
def get_networks(project_id, include_data='N', **kwargs):
    """
        Get all networks in a project
        Returns an array of network objects.
    """
    log.info("Getting networks for project %s", project_id)
    user_id = kwargs.get('user_id')
    project = _get_project(project_id)
    project.check_read_permission(user_id)

    rs = DBSession.query(Network.network_id, Network.status).filter(Network.project_id==project_id).all()
    networks=[]
    for r in rs:
        if r.status != 'A':
            continue
        try:
            net = network.get_network(r.network_id, summary=True, include_data=include_data, **kwargs)
            log.info("Network %s retrieved", net.network_name)
            networks.append(net)
        except PermissionError:
            log.info("Not returning network %s as user %s does not have "
                         "permission to read it."%(r.network_id, user_id))

    return networks
Пример #14
0
def main():
    config = read_config()
    c = conn(config, method='post')
    args = docopt(__doc__, version='%s %s' % (__cli_name__, __version__))
    res = False
    res_get = False
    if args['deploy']:
        try:
            res = deploy_vm(c, args)
        except Exception as e:
            print "error deploying instance: %s"  % e
            sys.exit(1)
    elif args['destroy']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = destroy_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'], networkid=get_network(c, args['--network'])[0].id)
                    res = destroy_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args['INSTANCE']
        except Exception as e:
            print "Error destroying instance: %s" % e
    elif args['stop']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = stop_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'], networkid=get_network(c, args['--network'])[0].id)
                    res = stop_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args['INSTANCE']
        except Exception as e:
            print "Error stopping instance: %s" % e
    elif args['start']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = start_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'], networkid=get_network(c, args['--network'])[0].id)
                    res = start_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args['INSTANCE']
        except Exception as e:
            print "Error starting instance: %s" % e
    elif args['list']:
        res = list_vms(c, args)
    elif args['get']:
        res = c.list_virtualmachines(name=args['INSTANCE'])
    else:
        print "Unable to execute command"
        sys.exit(1)

    if res:
        print_tabulate(res, noheader=args['--noheader'], short=args['--short'])
    else:
        print "No virtual machines found, deploy new machines using `rbc-instances deploy`"
        sys.exit(1)
Пример #15
0
                          drop_last=True,
                          batch_size=DATASET_PARAMETERS['batch_size'],
                          num_workers=DATASET_PARAMETERS['workers_num'],
                          collate_fn=collate_fn)
face_loader = DataLoader(face_dataset,
                         shuffle=True,
                         drop_last=True,
                         batch_size=DATASET_PARAMETERS['batch_size'],
                         num_workers=DATASET_PARAMETERS['workers_num'])

voice_iterator = iter(cycle(voice_loader))
face_iterator = iter(cycle(face_loader))

# networks, Fe, Fg, Fd (f+d), Fc (f+c)
print('Initializing networks...')
e_net, e_optimizer = get_network('e', NETWORKS_PARAMETERS, train=False)
g_net, g_optimizer = get_network('g', NETWORKS_PARAMETERS, train=True)
f_net, f_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)
d_net, d_optimizer = get_network('d', NETWORKS_PARAMETERS, train=True)
c_net, c_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True)

# label for real/fake faces
real_label = torch.full((DATASET_PARAMETERS['batch_size'], 1), 1)
fake_label = torch.full((DATASET_PARAMETERS['batch_size'], 1), 0)

# Meters for recording the training status
iteration = Meter('Iter', 'sum', ':5d')
data_time = Meter('Data', 'sum', ':4.2f')
batch_time = Meter('Time', 'sum', ':4.2f')
D_real = Meter('D_real', 'avg', ':3.2f')
D_fake = Meter('D_fake', 'avg', ':3.2f')
Пример #16
0
# This script is for testing.
import os
import glob
import torch
import scipy.io as sio
import numpy as np
import cv2

from config import NETWORKS_PARAMETERS
from network import get_network, IGM
from utils import voice2face_processed
from utilf.render import render_vert

# initialization
e_net, _ = get_network('e', NETWORKS_PARAMETERS, train=False)
g_net, _ = get_network('g', NETWORKS_PARAMETERS, train=False)

# building models: Voice2Mesh unsupervised
image3D = IGM(pretrained=False, last_CN=None).cuda().eval()
backbone_ckpt = torch.load(NETWORKS_PARAMETERS['image3D']['model_path'])
image3D.load_state_dict(backbone_ckpt)

# 3DDFA-V2 pretrained network for getting pose
image3D_pretrained = IGM(pretrained=True).cuda().eval()

# data and config
voice_list = sorted(glob.glob('data/preprocessed_MFCC/*'))
up_layer = torch.nn.Upsample((120, 120), mode='bilinear', align_corners=True)
tri = sio.loadmat('./train.configs/tri.mat')['tri']

# [TODO] Change this variable to yout result output folder
Пример #17
0
(tokenizer, data_input, data_labels, max_input_length,
 label_classes_to_index) = load_dataset.preprocess_dataset(lyrics)
index_to_label_class = {v: k for k, v in label_classes_to_index.items()}

(embeddings_words, embeddings_vec_size) = load_dataset.load_embeddings()
(embeddings_matrix,
 idx_to_word_map) = load_dataset.glove_to_matrix(embeddings_words, tokenizer)

vocab_size = len(tokenizer.word_index) + 1
print("Vocabulary Size:", vocab_size)
print("Inputs Shape:", data_input.shape)
print("Labels Shape:", data_labels.shape)
print("Classes are:")
print(label_classes_to_index.keys())

model = network.get_network(max_input_length, data_labels.shape[1],
                            embeddings_matrix, tokenizer, embeddings_vec_size)
model.summary()

name = "100_cells"

callbacks = [
    ks.callbacks.TensorBoard("./logs/{}".format(name),
                             write_graph=True,
                             write_grads=False,
                             write_images=False),
    ks.callbacks.ModelCheckpoint("./models/{}".format(name + ".dat"),
                                 save_best_only=True)
]

model.fit(data_input,
          data_labels,
Пример #18
0
from api import messages_get_history
from api_models import Message
from network import get_network
from network import plot_graph
from api import get_friends
from messages import count_dates_from_messages, plotly_messages_freq
'''messages = messages_get_history(175239807, offset=10, count=50)
messages_list = [Message(**mes) for mes in messages]
dates_freqs = count_dates_from_messages(messages_list)
plotly_messages_freq(dates_freqs[0], dates_freqs[1])'''

friends = get_friends(141948816, fields='bdate')
ids = []
names = []
for friend in friends:
    ids.append(friend['uid'])
    names.append(friend['first_name'] + ' ' + friend['last_name'])
edges = get_network(ids)
plot_graph(edges, names)
Пример #19
0

def get_session():
    gpu_options = tf.GPUOptions(allow_growth=True)
    return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))


ktf.set_session(get_session())

np.random.seed(0)

if __name__ == '__main__':

    parser = argparse.ArgumentParser(description="Train and visualize model")
    parser.add_argument("mode")
    parser.add_argument("-model", default=None)
    parser.add_argument("-seq", default=0)
    args, leftovers = parser.parse_known_args()
    model = get_network()

    if args.mode == 'train':
        print("Training...")
        loader = Loader()
        train(model, loader.train_gen, loader.valid_gen)
        visualize(model, 0)

    if args.mode == 'test':
        print("Loading", str(args.model))
        model = load_model(str(args.model))
        visualize(model, int(args.seq))
Пример #20
0
def deploy_vm(c, a):
    deploy_args = {}
    if a['--number']:
        n = int(a['--number'])
    else:
        n = 1
    if a['INSTANCE']:
        deploy_args['name'] = a['INSTANCE']
        deploy_args['displayname'] = a['INSTANCE']
    if a['--group']:
        deploy_args['group'] = a['--group']
    if a['--template']:
        try:
            template = get_template(c, a['--template'])[0]
            deploy_args['templateid'] = template.id
        except:
            print "Unable to get templateid for %s" % a['--template']
            sys.exit(1)
    if a['--offering']:
        try:
            offering = get_offering(c, a['--offering'])[0]
            deploy_args['serviceofferingid'] = offering.id
        except:
            print "Unable to get offeringid for %s" % a['--offering']
            sys.exit(1)
    if a['--network']:
        try:
            network = get_network(c, a['--network'])[0]
            deploy_args['networkids'] = network.id
        except:
            print "Unable to get networkid for %s" % a['--network']
            sys.exit(1)
    if a['--zone']:
        zone = get_zone(c, a['--zone'])[0]
        deploy_args['zoneid'] = zone.id
    else:
        zone = get_zone(c)[0]
        deploy_args['zoneid'] = zone.id
    if a['--sshkey']:
        try:
            sshkey = get_sshkey(c, a['--sshkey'])[0]
            deploy_args['keypair'] = sshkey.name
        except:
            print "Unable to get ssh key: %s" % a['--sshkey']
            sys.exit(1)
    if a['--user-data']:
        deploy_args['userdata'] = base64.b64encode(a['--user-data'])
    if a['--user-data-file']:
        if os.path.isfile(a['--user-data-file']):
            with open(a['--user-data-file']) as f:
                encoded = base64.b64encode(f.read())
            size = sys.getsizeof(encoded)
            deploy_args['userdata'] = encoded
    # Deploy it!
    res = []
    for i in range(n):
        if n > 1:
            append_hash = hashlib.new('sha1', str(random.randint(
                0, 1000000))).hexdigest()[:3]
            deploy_args['name'] = "%s-%s" % (a['INSTANCE'], append_hash)
            deploy_args['displayname'] = "%s-%s" % (a['INSTANCE'], append_hash)
        res.append(c.deploy_virtualmachine(**deploy_args))
    vms = []
    if a['--tags']:
        tags = {}
        for tag in a['--tags'].split(','):
            regex = re.compile(r"\b(\w+)\s*:\s*([^:]*)(?=\s+\w+\s*:|$)")
            tags.update(dict(regex.findall(tag)))
    if not a['--nowait']:
        for j in res:
            if a['--tags']:
                c.create_tags(resourceids=j.id,
                              resourcetype='UserVm',
                              tags=tags)
            vms.append(j.get_result())
    else:
        for j in res:
            if a['--tags']:
                c.create_tags(resourceids=j.id,
                              resourcetype='UserVm',
                              tags=tags)
            vms.append(c.list_virtualmachines(id=j.id)[0])
    return vms
Пример #21
0
                             horizontal_flip=True,
                             fill_mode='nearest')

    print('compiling model...')
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print('training network...')
    H = model.fit_generator(aug.flow(X_train, y_train, batch_size=BATCH_SIZE),
                            validation_data=(X_test, y_test),
                            steps_per_epoch=len(X_train) // BATCH_SIZE,
                            epochs=EPOCHS,
                            verbose=1)

    model.save('./first_model.h5')
    f = open('./label_bin', 'wb')
    f.write(pickle.dumps(lb))
    f.close()
    plot_curve(H)


if __name__ == "__main__":
    net = get_network(img_w, img_h, img_d, classes)
    model = net()
    print(model.summary())
    imagePaths = read_image_paths()
    data, labels = load_dataset(imagePaths)
    start_training(data, labels, model)
Пример #22
0
def deploy_vm(c, a):
    deploy_args = {}
    if a['--number']:
        n = int(a['--number'])
    else:
        n = 1
    if a['INSTANCE']:
        deploy_args['name'] = a['INSTANCE']
        deploy_args['displayname'] = a['INSTANCE']
    if a['--group']:
        deploy_args['group'] = a['--group']
    if a['--template']:
        try:
            template = get_template(c, a['--template'])[0]
            deploy_args['templateid'] = template.id
        except:
            print "Unable to get templateid for %s" % a['--template']
            sys.exit(1)
    if a['--offering']:
        try:
            offering = get_offering(c, a['--offering'])[0]
            deploy_args['serviceofferingid'] = offering.id
        except:
            print "Unable to get offeringid for %s" % a['--offering']
            sys.exit(1)
    if a['--network']:
        try:
            network = get_network(c, a['--network'])[0]
            deploy_args['networkids'] = network.id
        except:
            print "Unable to get networkid for %s" % a['--network']
            sys.exit(1)
    if a['--zone']:
        zone = get_zone(c, a['--zone'])[0]
        deploy_args['zoneid'] = zone.id
    else:
        zone = get_zone(c)[0]
        deploy_args['zoneid'] = zone.id
    if a['--sshkey']:
        try:
            sshkey = get_sshkey(c, a['--sshkey'])[0]
            deploy_args['keypair'] = sshkey.name
        except:
            print "Unable to get ssh key: %s" % a['--sshkey']
            sys.exit(1)
    if a['--user-data']:
        deploy_args['userdata'] = base64.b64encode(a['--user-data'])
    if a['--user-data-file']:
        if os.path.isfile(a['--user-data-file']):
            with open(a['--user-data-file']) as f:
                encoded = base64.b64encode(f.read())
            size = sys.getsizeof(encoded)
            deploy_args['userdata'] = encoded
    # Deploy it!
    res = []
    for i in range(n):
        if n > 1:
            append_hash = hashlib.new('sha1', str(random.randint(0,1000000))).hexdigest()[:3]
            deploy_args['name'] = "%s-%s" % (a['INSTANCE'], append_hash)
            deploy_args['displayname'] = "%s-%s" % (a['INSTANCE'], append_hash)
        res.append(c.deploy_virtualmachine(**deploy_args))
    vms = []
    if a['--tags']:
        tags = {}
        for tag in a['--tags'].split(','):
            regex = re.compile(r"\b(\w+)\s*:\s*([^:]*)(?=\s+\w+\s*:|$)")
            tags.update(dict(regex.findall(tag)))
    if not a['--nowait']:
        for j in res:
            if a['--tags']:
                c.create_tags(resourceids=j.id, resourcetype='UserVm', tags=tags)
            vms.append(j.get_result())
    else:
        for j in res:
            if a['--tags']:
                c.create_tags(resourceids=j.id, resourcetype='UserVm', tags=tags)
            vms.append(c.list_virtualmachines(id=j.id)[0])
    return vms
loaded_img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(loaded_img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
x = x.reshape(299, 299, 3)
Image_x2 = []
Image_x2.append(x)
Image_x2 = np.asarray(Image_x2)
Text_x2 = []
Text_x2.append(df.caption[0])
print("Caption: ", df.caption[0])
images = tf.image.convert_image_dtype(input_image1, tf.float32)
img_emb = module_img(images)
s_emb = embed(input_text)
i_embed, s_embed = network.get_network(img_emb, s_emb)

sess = tf.Session()
sess.run(tf.tables_initializer())
saver = tf.train.Saver(save_relative_paths=True)
saver = tf.train.import_meta_graph('model-5000.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))

sen_embed, img_embed1 = sess.run([s_embed, i_embed],
                                 feed_dict={
                                     input_text: Text_x2,
                                     input_image1: Image_x2
                                 })
print("Distance between First Caption and First Image: ",
      distance.euclidean(sen_embed, img_embed1))
print("Cosine Distance: ", spatial.distance.cosine(sen_embed, img_embed1))
Пример #24
0
    drop_last=True,
    batch_size=DATASET_PARAMETERS['batch_size'],
    num_workers=DATASET_PARAMETERS['workers_num'],  # 使用多进程加载的进程数
    collate_fn=collate_fn)  # 如何将多个样本数据拼接成一个batch
face_loader = DataLoader(face_dataset,
                         shuffle=True,
                         drop_last=True,
                         batch_size=DATASET_PARAMETERS['batch_size'],
                         num_workers=DATASET_PARAMETERS['workers_num'])

voice_iterator = iter(cycle(voice_loader))
face_iterator = iter(cycle(face_loader))

print('Initializing networks...')
NETWORKS_PARAMETERS['e']['output_channel'] = id_class_num
e_net, e_optimizer = get_network('e', NETWORKS_PARAMETERS, test=True)  # 部分训练
NETWORKS_PARAMETERS['g']['input_channel'][1] = emotion_class_num
g_net, g_optimizer = get_network('g', NETWORKS_PARAMETERS, train=True)
NETWORKS_PARAMETERS['d1-condition']['input_channel'][1] = emotion_class_num
d1_net, d1_optimizer = get_network('d0', NETWORKS_PARAMETERS, train=True)
d2_net, d2_optimizer = get_network('d0', NETWORKS_PARAMETERS, train=True)
f1_net, f1_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)
f2_net, f2_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)

NETWORKS_PARAMETERS['c']['output_channel'] = id_class_num
c1_net, c1_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True)
NETWORKS_PARAMETERS['c']['output_channel'] = emotion_class_num
c2_net, c2_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True)

# 接力训练,载入已有的模型
if NETWORKS_PARAMETERS['finetune']:
Пример #25
0
def train_semantic_segmentation(features,responses,fold_assignments,\
    savename,network_name='flex_unet',network_kwargs={},\
    save_directory='trained_models',verification_fold=None,\
    internal_window_radius=None,weighted=False,\
    nodata_value=-9999,epoch_batch_size=1,batch_size=10,\
    max_epochs=1000,n_noimprovement_repeats=5,
    save_continuously=False,verbose=True):
    """ Train a semantic segmentation network. 

    Arguments:
    features - 4d numpy array 
      Training features, ordered as sample,y,x,p, with p as 
      the feature dimension
    responses - 4d numpy array - 
      Training responses, ordered as sample,y,x,r with the r 
      dimension as a single-band categorical classification
    fold_assignments - 1d numpy array
      sample-specific n_folds assignments
    savename - str
      name to save trained model iterations and training history as
    network_name - str 
      name of the network to deploy

    Keyword Arguments:
    network_kwargs - dict
      Keyword arguments to pass on to the specific network deployed.
    save_directory - str
      Directory that the trained models should be stored in.
    verification_fold - int
      Which fold to use for model validation - if not specified, 
      no verification set is used.
    internal_window_radius - int
      The size of the internal window on which to score the model.
    weighted - bool
      Aflag to indicate whether or not to use response weighting.
    nodata_value - float
      Data value to ignore.
    epoch_batch_size - int 
      The number of epochs to train before evaluating/saving.
    batch_size - int 
      The number of samples to train on (keras batch size).
    max_epochs - int 
      The maximum number of epochs to train on.
    n_noimprovement_repeats - int 
      The amount of iterations to continue training with no
      performance imrpovement
    save_continuously - bool
      Boolean to indicate whether or not to save the model weights
      at every epoch.
    verbose - int
      An integer indication of verbosity level.  Possible values:
        0 - print nothing
        1 - print only training info
        2 - print everything

  Returns:
    A trained CNN model.
  """

    window_radius = rint(responses.shape[1] / 2.)
    if (internal_window_radius is None): internal_window_radius = window_radius

    if (os.path.isdir(save_directory) == False): os.mkdir(save_directory)

    savename_base = os.path.join(save_directory, savename)

    if (verification_fold is not None):
        train_set = fold_assignments != verification_fold
        test_set = fold_assignments == verification_fold
    else:
        train_set = np.ones(len(fold_assignments)).astype(bool)

    # assign training and testing X
    train_X = features[train_set, ...]
    if (verification_fold is not None):
        test_X = features[test_set, ...]
    del features

    un_responses = np.unique(responses[np.logical_and(
        responses != nodata_value,
        np.isnan(responses) == False)])

    tY = responses[train_set, :, :].astype(int)
    if (internal_window_radius != window_radius):
        buffer = (window_radius - internal_window_radius)
        tY[:, :buffer, :, :] = nodata_value
        tY[:, -buffer:, :, :] = nodata_value
        tY[:, :, :buffer, :] = nodata_value
        tY[:, :, -buffer:, :] = nodata_value

    if (weighted):
        train_Y = np.ones((tY.shape[0], tY.shape[1], tY.shape[2],
                           len(un_responses) + 1)).astype(float)

        # assign_weights
        response_weights = np.zeros(len(un_responses))
        response_counts = np.zeros(len(un_responses))
        for n in range(0, len(un_responses)):
            response_counts[n] = np.sum(tY == un_responses[n])

        for n in range(0, len(un_responses)):
            response_weights[n] = np.sum(tY != nodata_value) / float(
                response_counts[n])
            train_Y[np.squeeze(tY == un_responses[n]),
                    -1] = response_weights[n]
        train_Y[np.squeeze(tY == nodata_value), -1] = 0
    else:
        train_Y = np.ones((tY.shape[0], tY.shape[1], tY.shape[2],
                           len(un_responses))).astype(float)

    # Assign response values as slice categoricals
    for n in range(0, len(un_responses)):
        train_Y[..., n] = np.squeeze((tY == un_responses[n]).astype(float))

    if (verification_fold is not None):
        tY = responses[test_set, :, :].astype(float)

        if (weighted):
            test_Y = np.ones((tY.shape[0], tY.shape[1], tY.shape[2],
                              len(un_responses) + 1)).astype(float)
            for n in range(0, len(un_responses)):
                test_Y[..., n] = np.squeeze(
                    (tY == un_responses[n]).astype(float))
            for n in range(0, len(un_responses)):
                if (tY.shape[0] != 1):
                    test_Y[np.squeeze(tY == un_responses[n]),
                           -1] = response_weights[n]
                else:
                    test_Y[0, np.squeeze(tY == un_responses[n]),
                           -1] = response_weights[n]
            if (tY.shape[0] != 1):
                test_Y[np.squeeze(tY == nodata_value), -1] = 0
            else:
                test_Y[0, np.squeeze(tY == nodata_value), -1] = 0
        else:
            test_Y = np.ones((tY.shape[0], tY.shape[1], tY.shape[2],
                              len(un_responses))).astype(float)

        for n in range(0, len(un_responses)):
            test_Y[..., n] = np.squeeze((tY == un_responses[n]).astype(float))

    if (weighted):
        n_classes = train_Y.shape[-1] - 1
    else:
        n_classes = train_Y.shape[-1]
    model = network.get_network(network_name, train_X.shape[1:], n_classes,
                                network_kwargs)
    model.compile(loss=network.cropped_categorical_crossentropy(
        train_Y.shape[1], internal_window_radius * 2, weighted=weighted),
                  optimizer='adam')
    if (verbose == 2): model.summary()

    with open(savename_base + '.json', "w") as json_file:
        json_file.write(model.to_json())

    best_loss = 1e50
    last_best = 0
    best_ind = -1
    validation_loss_history = []
    training_loss_history = []
    training_time = []
    training_epochs = []

    start_time = time.time()
    best_model = keras.models.clone_model(model)
    for n in range(0, max_epochs):
        if (verification_fold is not None):
            output = model.fit(train_X,
                               train_Y,
                               validation_data=(test_X, test_Y),
                               epochs=epoch_batch_size,
                               batch_size=batch_size,
                               verbose=verbose > 0,
                               shuffle=False)

            lvl = output.history['val_loss'][0]
            validation_loss_history.append(lvl)
            training_loss_history.append(output.history['loss'][0])
            training_time.append(time.time() - start_time)
            training_epochs.append(n * epoch_batch_size)
            if (lvl < best_loss * .98):
                best_loss = lvl
                last_best = 0
                best_ind = n
                best_model.set_weights(model.get_weights())
            else:
                last_best += 1
            perm = np.random.permutation(train_X.shape[0])
            train_X = train_X[perm, ...]
            train_Y = train_Y[perm, ...]
        else:
            model.fit(train_X,
                      train_Y,
                      epochs=epoch_batch_size,
                      batch_size=batch_size,
                      verbose=verbose > 0,
                      shuffle=False)

        if (save_continuously):
            model.save_weights(savename_base + '_epoch_' +
                               str(n * epoch_batch_size),
                               overwrite=True)

        if (last_best >= n_noimprovement_repeats):
            break

    best_model.save_weights(savename_base + '_weights', overwrite=True)
    np.savez(savename_base + '.npz',\
             training_loss=training_loss_history,\
             validation_loss=validation_loss_history,\
             training_time=training_time,\
             training_epochs=training_epochs)
    return model
Пример #26
0
from network import get_network, plot_graph
from api import get_friends, get_wall
from age import age_predict
from api_models import normalize

print('test age_predict')
print(age_predict(125483792))

print('test graph')
friends = get_friends(125483792, 'id')['response']
users_ids = []
names = []
k = 0
for friend in friends['items']:
    users_ids.append(friend['id'])
    names.append(friend['first_name'] + ' ' + friend['last_name'])
    k += 1
    if k == 50:
        break
edges = get_network(users_ids, as_edgelist=True)
plot_graph(edges, names)
'''
with open('1.txt', 'w') as f:
    post_list = get_wall(domain='pn6', count=5)
    f.writelines(normalize(post_list))
'''
Пример #27
0
        count += 1

    if opt.split != -1:
        opt.savename += '/split%d' % opt.split

else:
    opt.weights = opt.savename + 'checkpoint.pth.tar'
    opt.savename += '/evaluation/'


if not os.path.exists(opt.savename+'/samples/'):
    os.makedirs(opt.savename+'/samples/')

"""=============================NETWORK SETUP==============================="""
opt.device = torch.device('cuda')
model      = network.get_network(opt)

if opt.weights and opt.weights != "none":
    #model.load_state_dict(torch.load(opt.weights)['state_dict'])
    j = len('module.')
    weights = torch.load(opt.weights)['state_dict']
    model_dict = model.state_dict()
    weights = {k[j:]: v for k, v in weights.items() if k[j:] in model_dict.keys()}
    # if not opt.evaluate:
    #     weights = {k: v for k, v in weights.items() if 'regressor' not in k}
    model_dict.update(weights)
    model.load_state_dict(model_dict)
    print("LOADED MODEL:  ", opt.weights)

model = nn.DataParallel(model)
_ = model.to(opt.device)
Пример #28
0
module_img = hub.Module(
    "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1",
    trainable=False)
headers = ["img", "caption"]
df = pd.read_csv('/home/super/datasets/flicker30/results_20130124_orig.token',
                 names=headers,
                 delimiter="\t")
module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/2"
embed = hub.Module(module_url, trainable=True)

session = tf.InteractiveSession()
global_step = tf.Variable(0, trainable=False)
images = tf.image.convert_image_dtype(input_image, tf.float32)
img_emb = module_img(images)
sent_emb = embed(input_text)
i_emb, s_emb = network.get_network(img_emb, sent_emb)
loss = network.embedding_loss(i_emb, s_emb, labels_phl)
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
train_step = optimizer.minimize(loss, global_step=global_step)
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
saver = tf.train.Saver()

for epoch in range(NUM_EPOCH_):  # set total epochs
    print("Epoch: ", epoch)
    for epoch2 in range(2):  # epochs per mini batch
        Image_x = []
        Image_x2 = []
        Text_x = []
        Label_y = []
        j = 0
        if (getBatch):
Пример #29
0
import torch.nn.functional as F
import torch.optim as optim

from dataloader import train_iter, test_iter
from network import get_network
from test import test
from utils import *

m = 4
alpha = 0.001

lr = 2e-4
n_epochs = 100
os.makedirs(save_dir, exist_ok=True)

model = get_network().to(DEVICE)
print_network(model)
trainer = optim.Adam(model.parameters(), lr=lr, betas=[0.5, 0.999])

for epoch in range(n_epochs):
    model.train()
    for b, (pos_1, neg_1, pos_2, neg_2) in enumerate(train_iter):
        pos_1 = pos_1.to(DEVICE)
        neg_1 = neg_1.to(DEVICE)
        pos_2 = pos_2.to(DEVICE)
        neg_2 = neg_2.to(DEVICE)

        # 正向
        pb_1 = model(pos_1)
        nb_1 = model(neg_1, False)
        pb_2 = model(pos_2)
Пример #30
0
def main():
    config = read_config()
    c = conn(config, method='post')
    args = docopt(__doc__, version='%s %s' % (__cli_name__, __version__))
    res = False
    res_get = False
    if args['deploy']:
        try:
            res = deploy_vm(c, args)
        except Exception as e:
            print "error deploying instance: %s" % e
            sys.exit(1)
    elif args['destroy']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = destroy_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'],
                                                networkid=get_network(
                                                    c,
                                                    args['--network'])[0].id)
                    res = destroy_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args[
                        'INSTANCE']
        except Exception as e:
            print "Error destroying instance: %s" % e
    elif args['stop']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = stop_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'],
                                                networkid=get_network(
                                                    c,
                                                    args['--network'])[0].id)
                    res = stop_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args[
                        'INSTANCE']
        except Exception as e:
            print "Error stopping instance: %s" % e
    elif args['start']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = start_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'],
                                                networkid=get_network(
                                                    c,
                                                    args['--network'])[0].id)
                    res = start_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args[
                        'INSTANCE']
        except Exception as e:
            print "Error starting instance: %s" % e
    elif args['list']:
        res = list_vms(c, args)
    elif args['get']:
        res = c.list_virtualmachines(name=args['INSTANCE'])
    else:
        print "Unable to execute command"
        sys.exit(1)

    if res:
        print_tabulate(res, noheader=args['--noheader'], short=args['--short'])
    else:
        print "No virtual machines found, deploy new machines using `rbc-instances deploy`"
        sys.exit(1)