示例#1
0
def main(noeval, **args):

    #args should be the info you need to specify the params
    # for a given experiment, but only params should be used below
    params = fill_params(**args)

    utils.set_gpus(params["gpus"])

    net = utils.create_network(**params)
    if not noeval:
        net.eval()

    utils.log_tagged_modules(params["modules_used"], params["log_dir"],
                             params["log_tag"], params["chkpt_num"])

    for dset in params["dsets"]:
        print(dset)

        fs = make_forward_scanner(dset, **params)

        output = forward.forward(net,
                                 fs,
                                 params["scan_spec"],
                                 activation=params["activation"])

        save_output(output, dset, **params)
def main():
    log.basicConfig(format = '[ %(levelname)s ] %(message)s',
        level = log.INFO, stream = sys.stdout)
    args = build_argparser().parse_args()
    try:
        model_wrapper = openvino_io_model_wrapper()
        data_transformer = transformer()
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        iecore = utils.create_ie_core(args.extension, args.cldnn_config, args.device,
            args.nthreads, None, args.dump, 'sync', log)
        net = utils.create_network(iecore, args.model_xml, args.model_bin, log)
        input_shapes = utils.get_input_shape(model_wrapper, net)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        utils.reshape_input(net, args.batch_size)
        log.info('Prepare input data')
        io.prepare_input(net, args.input)
        log.info('Create executable network')
        exec_net = utils.load_network(iecore, net, args.device, args.priority, 1)
        log.info('Starting inference ({} iterations) on {}'.
            format(args.number_iter, args.device))
        result, time = infer_sync(exec_net, args.number_iter, io.get_slice_input)
        average_time, latency, fps = process_result(time, args.batch_size, args.mininfer)
        if not args.raw_output:
            io.process_output(result, log)
            result_output(average_time, fps, latency, log)
        else:
            raw_result_output(average_time, fps, latency)
        del net
        del exec_net
        del iecore
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)
示例#3
0
def start_training(model_class, model_args, model_kwargs, chkpt_num, lr,
                   train_sets, val_sets, data_dir, **params):

    #PyTorch Model
    net = utils.create_network(model_class, model_args, model_kwargs)
    monitor = utils.LearningMonitor()

    #Loading model checkpoint (if applicable)
    if chkpt_num != 0:
        utils.load_chkpt(net, monitor, chkpt_num, params["model_dir"],
                         params["log_dir"])

    #DataProvider Sampler
    Sampler = params["sampler_class"]
    train_sampler = utils.AsyncSampler(
        Sampler(data_dir,
                dsets=train_sets,
                mode="train",
                resize=params["resize"]))

    val_sampler = utils.AsyncSampler(
        Sampler(data_dir, dsets=val_sets, mode="val", resize=params["resize"]))

    loss_fn = loss.BinomialCrossEntropyWithLogits()
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)

    train.train(net,
                loss_fn,
                optimizer,
                train_sampler,
                val_sampler,
                last_iter=chkpt_num,
                monitor=monitor,
                **params)
示例#4
0
def main_fwd(noeval, **args):

    #args should be the info you need to specify the params
    # for a given experiment, but only params should be used below
    params = fill_params_fwd(**args)

    utils.set_gpus(params["gpus"])

    net = utils.create_network(**params)
    if not noeval:
        net.eval()

    utils.log_tagged_modules(params["modules_used"], params["log_dir"],
                             params["log_tag"], params["chkpt_num"])

    #lightsheet mods - input folder contains list of our "big" patches
    input_fld = os.path.join(params["data_dir"],
                             "input_patches")  #set directory
    output_fld = os.path.join(params["data_dir"],
                              "cnn_output")  #set patches directory

    if not os.path.exists(output_fld): os.mkdir(output_fld)
    jobid = 0  #for demo only

    #find files that need to be processed
    fls = [os.path.join(input_fld, xx) for xx in os.listdir(input_fld)]
    fls.sort()

    #select the file to process for this batch job
    if jobid > len(fls):
        #essentially kill job if too high - doing this to hopefully help with karma score although might not make a difference
        sys.stdout.write("\njobid {} > number of files {}\n".format(
            jobid, len(fls)))
        sys.stdout.flush()
    else:
        dset = fls[jobid]

        start = time.time()

        fs = make_forward_scanner(dset, **params)
        sys.stdout.write("\striding by: {}".format(fs.stride))
        sys.stdout.flush()

        output = forward.forward(
            net,
            fs,
            params["scan_spec"],  #runs forward pass
            activation=params["activation"])

        save_output(output, dset, output_fld, jobid, params["output_tag"],
                    params["chkpt_num"])  #saves tif
        fs._init()  #clear out scanner

    sys.stdout.write("\patch {}: {} min\n".format(
        jobid + 1, round((time.time() - start) / 60, 1)))
    sys.stdout.flush()
示例#5
0
def main():
    ontology = dag.DAG(config.go_fpath)

    #gene_annotation = utils.get_annotation(config.annotation_fpath, config.filtered_annotation_fpath, ontology.get_root())
    gene_annotation = utils.get_annotation(config.annotation_fpath, config.filtered_annotation_fpath, ontology.get_root())
    print "Number of annotated genes:%d" % len(gene_annotation)

    network = utils.create_network(config.network_fpath)
    print "Number of nodes in network:%d" % network.number_of_nodes()

    '''
示例#6
0
def main(noeval, **args):

    #args should be the info you need to specify the params
    # for a given experiment, but only params should be used below
    params = fill_params(**args)

    utils.set_gpus(params["gpus"])

    net = utils.create_network(**params)
    if not noeval:
        net.eval()

    utils.log_tagged_modules(params["modules_used"], params["log_dir"],
                             params["log_tag"], params["chkpt_num"])

    #lightsheet mods - input folder contains list of our "big" patches
    input_fld = os.path.join(params["data_dir"],
                             "input_chnks")  #set patches directory
    sys.stdout.write("running inference on: \n{}\n".format(
        os.path.basename(params["data_dir"])))
    sys.stdout.flush()
    output_fld = os.path.join(params["data_dir"],
                              "output_chnks")  #set output directory

    jobid = int(params["jobid"])  #set patch no. to run through cnn

    #find files that need to be processed
    fls = [os.path.join(input_fld, xx) for xx in os.listdir(input_fld)]
    fls.sort()

    #select the file to process for this array job
    if jobid > len(fls) - 1:
        sys.stdout.write("\njobid {} > number of files {}".format(
            jobid, len(fls)))
        sys.stdout.flush()
    else:
        start = time.time()
        dset = fls[jobid]

        fs = make_forward_scanner(dset, **params)

        output = forward.forward(
            net,
            fs,
            params["scan_spec"],  #runs forward pass
            activation=params["activation"])

        save_output(output, dset, output_fld, **params)  #saves tif
        fs._init()  #clear out scanner

        sys.stdout.write("patch {}: {} min\n".format(
            jobid + 1, round((time.time() - start) / 60, 1)))
        sys.stdout.flush()
示例#7
0
def generate():
    """ Generate a piano midi file """

    # Get all pitch names
    notes = utils.get_nodes_duration_for_prediction(utils.seed_file)

    normalized_input, network_input, _ = utils.prepare_sequences(notes)
    model = utils.create_network(normalized_input)
    # Load the weights to each node
    model.load_weights(weights)

    prediction_output = generate_notes(model, network_input)
    create_midi(prediction_output)
示例#8
0
def main():
    in_args = get_train_args()

    dataloaders, image_datasets = load_data(in_args)

    model = create_network(in_args)

    train_network(in_args, model, dataloaders)

    save_checkpoint(model, in_args, image_datasets)

    test_network(model, dataloaders, in_args)

    return None
示例#9
0
def start_training(model_class, model_args, model_kwargs, sampler_class,
                   sampler_spec, augmentor_constr, chkpt_num, lr, train_sets,
                   val_sets, data_dir, model_dir, log_dir, tb_train, tb_val,
                   **params):

    #PyTorch Model
    net = utils.create_network(model_class, model_args, model_kwargs)
    train_writer = tensorboardX.SummaryWriter(tb_train)
    val_writer = tensorboardX.SummaryWriter(tb_val)
    monitor = utils.LearningMonitor()

    #Loading model checkpoint (if applicable)
    if chkpt_num != 0:
        utils.load_chkpt(net, monitor, chkpt_num, model_dir, log_dir)

    #DataProvider Stuff
    train_aug = augmentor_constr(True)
    train_sampler = utils.AsyncSampler(
        sampler_class(data_dir,
                      sampler_spec,
                      vols=train_sets,
                      mode="train",
                      aug=train_aug))

    val_aug = augmentor_constr(False)
    val_sampler = utils.AsyncSampler(
        sampler_class(data_dir,
                      sampler_spec,
                      vols=val_sets,
                      mode="val",
                      aug=val_aug))

    loss_fn = loss.BinomialCrossEntropyWithLogits()
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)

    train.train(net,
                loss_fn,
                optimizer,
                train_sampler,
                val_sampler,
                train_writer=train_writer,
                val_writer=val_writer,
                last_iter=chkpt_num,
                model_dir=model_dir,
                log_dir=log_dir,
                monitor=monitor,
                **params)
示例#10
0
def main(args):
    video, video_writer, frame_count = init_video(args.video_file_path,
                                                  args.output_video_file_path)
    net_id, runtime, input_binding_info, output_binding_info = create_network(
        args.model_file_path, args.preferred_backends)
    output_tensors = ann.make_output_tensors(output_binding_info)
    labels, process_output, resize_factor = get_model_processing(
        args.model_name, video, input_binding_info)
    labels = dict_labels(
        labels if args.label_path is None else args.label_path)

    for _ in tqdm(frame_count, desc='Processing frames'):
        frame_present, frame = video.read()
        if not frame_present:
            continue
        input_tensors = preprocess(frame, input_binding_info)
        inference_output = execute_network(input_tensors, output_tensors,
                                           runtime, net_id)
        detections = process_output(inference_output)
        draw_bounding_boxes(frame, detections, resize_factor, labels)
        video_writer.write(frame)
    print('Finished processing frames')
    video.release(), video_writer.release()
示例#11
0
def main():
    logging.basicConfig(filename='mfgo.log', level=logging.DEBUG)

    ontology = dag.DAG(config.go_fpath)

    #gene_annotation = utils.get_annotation(config.annotation_fpath, config.filtered_annotation_fpath, ontology.get_root().id)
    gene_annotation = utils.get_slim_annotation(config.annotation_fpath, ontology.get_root().id)

    network = utils.create_network(config.network_fpath)
    print "Number of nodes in network: %d" % network.number_of_nodes()

    # Remove individual nodes by getting the largest indepedent connected component
    network = nx.connected_component_subgraphs(network)[0]
    print "Number of nodes in network after removing individual genes: %d" % network.number_of_nodes()
    print "Number of edges in network after removing individual genes:%d" % network.number_of_edges()

    # Get annotated genes in the network
    network_annotated_genes = {}
    for node in network.nodes():
        if node in gene_annotation:
            network_annotated_genes[node] = gene_annotation[node]
    print "Number of annotated genes in network:%d" % len(network_annotated_genes)

    '''
示例#12
0
# load pretrained model
model = models.__dict__[arch](pretrained=True)

# get pretrained model in_features number for the last layer
in_features, last_layer_name = utils.model_info(model)

# freeze pretrained model parameters
if hasattr(model, 'features'):
    for param in model.features.parameters():
        param.requires_grad = False
else:  # resnet
    for param in model.parameters():
        param.requires_grad = False

# create network with custom classifier
model = utils.create_network(model, in_features, last_layer_name, hidden_units,
                             dropout)
print(model)

# set loss
criterion = nn.NLLLoss()

# set optimizer parameters
if hasattr(model, 'classifier'):
    #optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate, weight_decay=weight_decay)
    optimizer = optim.SGD(model.classifier.parameters(),
                          lr=learning_rate,
                          momentum=momentum,
                          weight_decay=weight_decay)

elif hasattr(model, 'fc'):  # resnet
    #optimizer = optim.Adam(model.fc.parameters(), lr=learning_rate, weight_decay=weight_decay)
def Trainer(opt):
    # ----------------------------------------
    #      Initialize training parameters
    # ----------------------------------------

    # cudnn benchmark accelerates the network
    cudnn.benchmark = opt.cudnn_benchmark

    # Handle multiple GPUs
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    opt.batch_size *= gpu_num
    opt.num_workers *= gpu_num
    print("Batch size is changed to %d" % opt.batch_size)
    print("Number of workers is changed to %d" % opt.num_workers)

    # Create folders
    save_folder = os.path.join('models', opt.type)
    utils.check_path(save_folder)

    # VGG16 network
    net = utils.create_network(opt)

    # To device
    if opt.multi_gpu == True:
        net = nn.DataParallel(net)
        net = net.cuda()
    else:
        net = net.cuda()

    # Loss functions
    criterion = torch.nn.CrossEntropyLoss().cuda()

    # Optimizers
    optimizer = torch.optim.SGD(net.parameters(), lr = opt.lr, momentum = opt.momentum, weight_decay = opt.weight_decay)

    # Learning rate decrease
    def adjust_learning_rate(optimizer, epoch, opt):
        """Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs"""
        lr = opt.lr * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
    
    # Save the model if pre_train == True
    def save_model(net, epoch, opt):
        """Save the model at "checkpoint_interval" and its multiple"""
        modelname = '%s_%s_epoch%d_bs%d_no_norm.pth' % (opt.type, opt.task, epoch, opt.batch_size)
        save_path = os.path.join(save_folder, modelname)
        if opt.multi_gpu == True:
            if epoch % opt.checkpoint_interval == 0:
                torch.save(net.module.state_dict(), save_path)
        else:
            if epoch % opt.checkpoint_interval == 0:
                torch.save(net.state_dict(), save_path)
        print('The trained model is successfully saved at epoch %d' % epoch)
    
    # ----------------------------------------
    #       Initialize training dataset
    # ----------------------------------------

    # Define the dataset
    trainset = dataset.ImageNetTrainSet(opt)
    print('The overall number of images equals to %d' % len(trainset))

    # Define the dataloader
    dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
    
    # ----------------------------------------
    #            Training and Testing
    # ----------------------------------------

    # Initialize start time
    prev_time = time.time()

    # Training loop
    for epoch in range(opt.epochs):
        for batch_idx, (data, target) in enumerate(dataloader):

            # Load data and put it to cuda
            data = data.cuda()
            target = target.cuda()

            # Train one iteration
            optimizer.zero_grad()
            output = net(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            # Determine approximate time left
            batches_done = epoch * len(dataloader) + batch_idx
            batches_left = opt.epochs * len(dataloader) - batches_done
            time_left = datetime.timedelta(seconds = batches_left * (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print("\r[Epoch %d/%d] [Batch %d/%d] [Cross-Entropy Loss: %.5f] time_left: %s" %
                ((epoch + 1), opt.epochs, batch_idx, len(dataloader), loss.item(), time_left))

        # Learning rate decrease
        adjust_learning_rate(optimizer, (epoch + 1), opt)

        # Save the model
        save_model(net, (epoch + 1), opt)
def Valer(opt):
    # ----------------------------------------
    #     Initialize validation parameters
    # ----------------------------------------

    # VGG16 network
    net = utils.create_network(opt)
    net = net.eval()

    # To device
    if opt.multi_gpu == True:
        net = nn.DataParallel(net)
        net = net.cuda()
    else:
        net = net.cuda()

    # ----------------------------------------
    #       Initialize validation dataset
    # ----------------------------------------

    # Define the dataset
    valset = dataset.ImageNetValSet(opt)
    overall_images = len(valset)
    print('The overall number of images equals to %d' % overall_images)

    # Define the dataloader
    dataloader = DataLoader(valset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
    
    # ----------------------------------------
    #                Validation
    # ----------------------------------------

    # Initialize accuracy
    overall_top1 = 0
    overall_top5 = 0

    # Validation loop
    for batch_idx, (data, target) in enumerate(dataloader):

        # Load data and put it to cuda
        data = data.cuda()
        target = target.cuda()

        # Train one iteration
        with torch.no_grad():
            output = net(data)

        # Get Top-5 accuracy
        output = torch.softmax(output, 1)
        maxk = max((1, 5))
        _, pred = output.topk(maxk, 1, True, True)
        pred_top5 = pred.cpu().numpy().squeeze()

        # Compare the result and target label
        if pred_top5[0] == target:
            top1 = 1
        else:
            top1 = 0
        top5 = 0
        for i in range(len(pred_top5)):
            if pred_top5[i] == target:
                top5 = 1
        overall_top1 = overall_top1 + top1
        overall_top5 = overall_top5 + top5

        # Print log
        print("Batch %d: [Top-1: %d] [Top-5: %d]" % (batch_idx, top1, top5))
    
    overall_top1 = overall_top1 / overall_images
    overall_top5 = overall_top5 / overall_images
    print('The accuracy of:', opt.finetune_path)
    print("Overall: [Image numbers: %d] [Top-1 Acc: %.5f] [Top-5 Acc: %.5f]" % (overall_images, overall_top1, overall_top5))
parser.add_argument('data_dir', action='store')
parser.add_argument('--save-dir', action='store', default=os.getcwd())
parser.add_argument('--arch', action='store', default='vgg13')
parser.add_argument('--learning_rate',
                    action='store',
                    default=0.002,
                    type=float)
parser.add_argument('--gpu', action='store', default=True)
parser.add_argument('--hidden_units', action='store', default=512, type=int)
parser.add_argument('--epochs', action='store', default=5, type=int)
parser.add_argument('--checkpoint', action='store')

args = parser.parse_args()
print(args)

train_dataset, train_dataloader = utils.load_train_data(args.data_dir +
                                                        "/train/")
valid_dataset, valid_dataloader = utils.load_valid_data(args.data_dir +
                                                        "/valid/")
test_dataset, test_dataloader = utils.load_test_data(args.data_dir + "/test/")

model = utils.create_network(args.arch, args.hidden_units, True)
criterion = utils.create_criterion()
optimizer = utils.create_optimizer(model, args.learning_rate)

model = utils.train_model(model, criterion, optimizer, train_dataloader,
                          valid_dataloader, args.epochs, args.gpu)

utils.save_checkpoint(model, args.arch, args.hidden_units, train_dataset,
                      args.save_dir)
示例#16
0
def train_network():
    """ Train a Neural Network to generate music """
    notes = utils.get_notes_duration()
    network_input, _, network_output = utils.prepare_sequences(notes)
    model = utils.create_network(network_input)
    train(model, network_input, network_output)
示例#17
0
import utils as ut

ut.create_network()

ut.translate("seed_genes", "sg_entrez")

ut.join_files("sg_entrez", "diamond", "intersection_list")

ut.top_ten("go_ora")
ut.top_ten("path_ora")
示例#18
0
                    default=0.2)
parser.add_argument('--arch', dest="arch", action='store', default='vgg16')
parser.add_argument('--learning_rate', dest='lr', action='store', default=0.01)
parser.add_argument('--hidden_units',
                    dest='hidden_units',
                    action='store',
                    default=1024)
parser.add_argument('--epochs', dest='epochs', action='store', default=1)
parser.add_argument('--gpu', dest='gpu', action='store', default='gpu')

args = parser.parse_args()
data_dir = args.data_dir
save_dir = args.save_dir
dropout_prob = args.drop_prob
arch = args.arch
learning_rate = args.lr
hidden_units = args.hidden_units
gpu = args.gpu
epochs = args.epochs

trainloader, validloader, testloader = utils.data_load(data_dir)

model, criterion, optimizer = utils.create_network(arch, learning_rate,
                                                   hidden_units, dropout_prob)

model = utils.train_network(model, criterion, optimizer, trainloader,
                            validloader, epochs, gpu)

utils.save_checkpoint(model, data_dir, save_dir, arch, hidden_units,
                      dropout_prob, learning_rate, epochs)
示例#19
0
def update_network_graph(scatterplot_input: dict, networkgraph_input: dict,
                         elements: list):
    """
    Updates network graph after relevant input in either scatterplot or network graph.
    :param scatterplot_input:
    :param networkgraph_input:
    :param elements:
    """

    fc: pd.DataFrame = filtered_courses

    if scatterplot_input is not None:
        global last_guid
        guid: str = str(scatterplot_input["points"][0]["customdata"])

        if last_guid != guid:
            last_guid = guid
            local_network: tuple = None

            # Show courses for chosen Tadira technique.
            if guid.startswith("TT"):
                local_network = utils.create_network(
                    fc[(fc.index.isin(tadirah_techniques.loc[[guid
                                                              ]].course_id))],
                    tadirah_objects.head(0), tadirah_techniques.loc[[guid]],
                    tadirah_objects_counts, tadirah_techniques_counts,
                    embedding)

            # Show courses for chosen Tadira object.
            elif guid.startswith("TO"):
                local_network = utils.create_network(
                    fc[(fc.index.isin(tadirah_objects.loc[[guid]].course_id))],
                    tadirah_objects.loc[[guid]], tadirah_techniques.head(0),
                    tadirah_objects_counts, tadirah_techniques_counts,
                    embedding)

            else:
                local_network = utils.create_network(
                    fc.loc[[int(guid)]], tadirah_objects, tadirah_techniques,
                    tadirah_objects_counts, tadirah_techniques_counts,
                    embedding)

            return local_network[0].tolist() + local_network[1].tolist()

    if networkgraph_input is not None:
        global node_extensions

        if networkgraph_input["id"] not in node_extensions:
            related_wiki = find_related_wiki(networkgraph_input, elements)
            node_extensions[networkgraph_input["id"]] = set()

            for wiki in related_wiki:
                uuid_wiki: str = str(uuid.uuid1())
                node_id: str = 'W{}'.format(uuid_wiki)
                elements.extend([
                    {
                        'data': {
                            'id': node_id,
                            'label': wiki
                        }
                    },
                    {
                        'data': {
                            'target': node_id,
                            'source': networkgraph_input["id"],
                            'weight': 0.5,
                            # Assign anonymous ID to edge.
                            'id': str(uuid.uuid1())
                        }
                    }
                ])
                node_extensions[networkgraph_input["id"]].add(node_id)

        elif networkgraph_input["id"] in node_extensions:
            # Collapse node.
            utils.collapse_node_in_network_graph(elements,
                                                 networkgraph_input["id"],
                                                 node_extensions, False)

        return elements

    else:
        return []
示例#20
0
def simulate(conn, output_dir, fname_peaks, fname_lfps_prefix, dt, n_runs, total_time, temperature, with_v1_l4, with_v1_l6, with_trn, input, con_input_lgn,
             n_e_lgn, n_i_lgn, n_e_l6, n_i_l6, n_e_l4, n_i_l4, n_trn,
             threshold, delay, delay_distbtn_e_l6_lgn, delay_e_l4_e_lgn, delay_e_lgn_i_l4, delay_e_lgn_e_l4, delay_e_lgn_e_l6,
             delay_e_lgn_trn, delay_e_l4_trn, delay_distbtn_e_l6_trn, delay_e_lgn_i_l6,
             lgn_params, l4_params, l6_params, trn_params, w_e_lgn_trn, w_trn_e_lgn, w_e_l6_trn, w_e_l4_e_l6,
             w_e_lgn_e_l4, w_e_l4_e_lgn, w_e_l6_e_lgn, w_e_lgn_e_l6, w_e_lgn_i_l6, w_e_lgn_i_l4, w_e_l4_trn,
             connect_e_lgn_e_l4, connect_e_lgn_i_l4, connect_e_l4_e_lgn, connect_e_lgn_i_l6, connect_e_lgn_e_l6, connect_e_l6_e_lgn, connect_e_l4_trn, connect_e_l6_trn,
             connect_e_lgn_trn, connect_trn_e_lgn, connect_e_l4_e_l6):

    start = np.empty(shape=0)
    bf_plot = np.empty(shape=0)
    af_plot = np.empty(shape=0)
    end = np.empty(shape=0)

    h.celsius = temperature
    print "* * * Simulating %d runs * * *" % n_runs
    h.tstop = total_time
    for n_sim in range(n_runs):
        print "#%d: Constructing circuits..." % (n_sim + 1)
        start = np.append(start, timer())
        # creating LGN network
        i_lgn, i_lgn_rec = create_network(n_i_lgn)
        e_lgn, e_lgn_rec = create_network(n_e_lgn)

        # create connections in LGN
        e_lgn_e_lgn_syn = e_net_connect(e_lgn, e_lgn, threshold, delay, lgn_params['w_e_lgn_e_lgn'], 1)
        i_lgn_i_lgn_syn = i_net_connect(i_lgn, i_lgn, threshold, delay, lgn_params['w_i_lgn_i_lgn'], 1)
        i_lgn_e_lgn_syn = i_net_connect(i_lgn, e_lgn, threshold, lgn_params['delay_i_e'], lgn_params['w_i_lgn_e_lgn'], 1)
        e_lgn_i_lgn_syn = e_net_connect(e_lgn, i_lgn, threshold, lgn_params['delay_e_i'], lgn_params['w_e_lgn_i_lgn'], 1)  # weight should be set to zero

        e_l4, e_l4_rec = create_network(n_e_l4)
        i_l4, i_l4_rec = create_network(n_i_l4)
        if with_v1_l4:
            # create connections in V1 L4
            e_l4_e_l4_sin = e_net_connect(e_l4, e_l4, threshold, delay, l4_params['w_e_l4_e_l4'], l4_params['p_e_e'])
            i_l4_i_l4_sin = i_net_connect(i_l4, i_l4, threshold, delay, l4_params['w_i_l4_i_l4'], l4_params['p_i_i'])
            e_l4_i_l4_sin = e_net_connect(e_l4, i_l4, threshold, delay, l4_params['w_e_l4_i_l4'], l4_params['p_e_i'])
            i_l4_e_l4_sin = i_net_connect(i_l4, e_l4, threshold, delay, l4_params['w_i_l4_e_l4'], l4_params['p_i_e'])


            # extrinsic connections

            # Population 1) 15 LGN E cells connect to 15 V1 L4 E cells
            # Population 2) 5 LGN E cells connect to 5 V1 L4 I cells
            #
            # Population 1 and population 2 are different
            #
            # Hirsch et al., 1998
            if connect_e_lgn_e_l4:
                # connections from Glutamatergic neurons of network LGN to network V1 L4
                e_lgn_e_l4_syn = partial_e_net_connect(e_lgn, e_l4, 2./4, 1, 2./4, 1, threshold, delay_e_lgn_e_l4, w_e_lgn_e_l4)
                # e_lgn_e_l4_syn = topographically_e_connect(e_lgn, e_l4, 0, 1, threshold, delay_e_lgn_e_l4, w_e_lgn_e_l4)

            if connect_e_l4_e_lgn:
                # TODO: feedback connections are only of 3/4 of neurons?
                # connections from Glutamatergic neurons of network 2 (V1) to network 1 (LGN)
                e_l4_e_lgn_syn = partial_e_net_connect(e_l4, e_lgn, 1./4, 1, 1./4, 1, threshold, delay_e_l4_e_lgn, w_e_l4_e_lgn)

            if connect_e_lgn_i_l4:
                # connections from Glutamatergic neurons of network (LGN) to network V1 L4
                e_lgn_i_l4_syn = partial_e_net_connect(e_lgn, i_l4, 0, 2./4, 0, 2./4, threshold, delay_e_lgn_i_l4, w_e_lgn_i_l4)
                # e_lgn_i_l4_syn = topographically_e_connect(e_lgn, i_l4, 0, 1./4, threshold, delay_e_lgn_i_l4, w_e_lgn_i_l4)

        i_l6, i_l6_rec = create_network_L6(n_i_l6)
        e_l6, e_l6_rec = create_network_L6(n_e_l6)
        if with_v1_l6:
            # create connections in V1 L6
            e_l6_e_l6_sin = e_net_connect(e_l6, e_l6, threshold, delay, l6_params['w_e_l6_e_l6'], l6_params['p_e_e'])
            i_l6_i_l6_sin = i_net_connect(i_l6, i_l6, threshold, delay, l6_params['w_i_l6_i_l6'], l6_params['p_i_i'])
            e_l6_i_l6_syn = e_net_connect(e_l6, i_l6, threshold, delay, l6_params['w_e_l6_i_l6'], l6_params['p_e_i'])
            i_l6_e_l6_syn = i_net_connect(i_l6, e_l6, threshold, delay, l6_params['w_i_l6_e_l6'], l6_params['p_i_e'])

            # connections from V1 input (L4) layer to L6
            if connect_e_l4_e_l6:
                e_l4_e_l6_sin = e_net_connect(e_l4, e_l6, threshold, 1, w_e_l4_e_l6, 1)

            # ALL-to-ALL connections of feedback
            if connect_e_l6_e_lgn:
                e_l6_e_lgn_sin = e_ct_net_connect_delay_dist(e_l6, e_lgn, threshold, delay_distbtn_e_l6_lgn, w_e_l6_e_lgn)

            # TODO: Connectivity as Hirsch
            if connect_e_lgn_e_l6:
                e_lgn_e_l6_syn = e_net_connect(e_lgn, e_l6, threshold, delay_e_lgn_e_l6, w_e_lgn_e_l6, 1)

            # TODO: Connectivity as Hirsch
            if connect_e_lgn_i_l6:
                e_lgn_i_l6_syn = e_net_connect(e_lgn, i_l6, threshold, delay_e_lgn_i_l6, w_e_lgn_i_l6, 1)

        # create trn neurons (inhibitory only)
        trn, trn_rec = create_network(n_trn)
        if with_trn:
            trn_trn_syn = i_net_connect(trn, trn, threshold, trn_params['delay_i_i'], trn_params['w_trn_trn'],
                                        trn_params['p_i_i'])

            # connections from Glutamatergic neurons of network V1 L4 to trn
            if with_v1_l4 and connect_e_l4_trn:
                e_l4_trn_syn = e_net_connect(e_l4, trn, threshold, delay_e_l4_trn, w_e_l4_trn, 1)

            if with_v1_l6 and connect_e_l6_trn:
                e_l6_trn_syn = e_net_connect_delay_dist(e_l6, trn, threshold, delay_distbtn_e_l6_trn, w_e_l6_trn, 1)

            if connect_e_lgn_trn:
                # connections from Glutamatergic neurons of LGN to TRN
                # ALL-to-ALL
                e_lgn_trn_syn = e_net_connect(e_lgn, trn, threshold, delay_e_lgn_trn, w_e_lgn_trn, 1)

                # # topographic
                # topographically_connect(e_lgn, trn, 0, 1, threshold, delay_e_lgn_trn, w_e_lgn_trn)

            if connect_trn_e_lgn:
                # ALL-to-ALL
                trn_e_lgn_sin = i_net_connect(trn, e_lgn, threshold, delay, w_trn_e_lgn, 1)

        # generate inputs to LGN
        netStim = list()
        i_stims = list()
        e_stims = list()
        stim_rec = h.Vector()
        for stim_i in range(0, input['nstims']):
            netStim.append(h.NetStimPois(input['position']))
            netStim[stim_i].start = 0
            netStim[stim_i].mean = input['stimrate']  # 100 = 10 Hz, 10 = 100 Hz, 1 = 1000Hz, 5 = 200 Hz, 6 = 150 Hz
            netStim[stim_i].number = 0
            if stim_i < n_i_lgn:
                i_stims.append(h.NetCon(netStim[stim_i], i_lgn[stim_i].synE, con_input_lgn['gaba_threshold'],
                                        con_input_lgn['gaba_delay'], con_input_lgn['gaba_weight']))
            if stim_i < n_e_lgn:
                e_stims.append(h.NetCon(netStim[stim_i], e_lgn[stim_i].synE, con_input_lgn['glut_threshold'],
                                        con_input_lgn['glut_delay'], con_input_lgn['glut_weight']))
        e_stims[0].record(stim_rec)  # measure poisson input #0 to LGN Excitatory Cell #0

        timeaxis = h.Vector()
        timeaxis.record(h._ref_t)
        print "#%d: Running simulation..." % (n_sim + 1)
        h.run()
        bf_plot = np.append(bf_plot, timer())
        mean_lgn, mean_trn, mean_v1_l4, mean_v1_l6 = plot_all(conn, output_dir, fname_peaks, n_sim, dt, timeaxis, stim_rec, with_v1_l4, with_v1_l6, with_trn,
                                                              e_lgn_rec, i_lgn_rec, trn_rec, e_l4_rec, i_l4_rec, e_l6_rec, i_l6_rec,
                                                              n_e_lgn, n_i_lgn, n_trn, n_e_l4, n_i_l4, n_e_l6, n_i_l6)
        af_plot = np.append(af_plot, timer())
        ofname = fname_lfps_prefix + str(n_sim) + ".txt"

        n = len(timeaxis)
        indx = np.arange(0, n+1, 40)  # store one in every 40 values

        lfp_lgn = np.array(mean_lgn)
        lfp_trn = np.array(mean_trn)
        lfp_l4 = np.array(mean_v1_l4)
        lfp_l6 = np.array(mean_v1_l6)
        time = np.array(timeaxis)
        np.savetxt(ofname, (lfp_lgn[indx], lfp_trn[indx], lfp_l4[indx], lfp_l6[indx], time[indx]))
        end = np.append(end, timer())

        print "Progress: %d runs simulated %d runs missing" % (n_sim + 1, n_runs - n_sim - 1)
    print_time_stats(start, bf_plot, af_plot, end)
示例#21
0
    variables_dict = load_from_json(predictors, response, verbose=True)

    # Split the data into training and test sets
    xtrain, xtest, ytrain, ytest, ytrain_hot, ytest_hot = create_model_data(
        variables_dict, predictors, response, model_type='Both')
    # Iterate through regression and classification models
    for model_type in ['Regression', 'Classification']:
        print('Beginning {} Model for {} Predictions'.format(
            model_type, response))
        print('Setting up TensorBoard')

        # Create the model structure, define the cost optimization functions
        x = tf.placeholder(tf.float32, [None, len(predictors)], name='x')
        if model_type == 'Classification':
            y = tf.placeholder(tf.float32, [None, 2], name='y')
            output_layer = create_network(x, hidden_layers, num_classes=2)
            cost = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(labels=y,
                                                        logits=output_layer))
        else:
            y = tf.placeholder(tf.float32, name='y')
            output_layer = tf.transpose(
                create_network(x, hidden_layers, num_classes=1))
            cost = tf.reduce_mean(tf.square(output_layer - y))
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

        # Train the model
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            print('Starting Model Training')
            for epoch in range(num_epochs):
示例#22
0
            running_loss += loss.item()
        else:
            validation_loss, validation_accuracy = test_network(
                model, validation_loader, device)
            print(
                f"Epoch {epoch+1}/{epochs}.. "
                f"Train loss: {running_loss/len(train_loader):.3f}.. "
                f"Validation loss: {validation_loss/len(validation_loader):.3f}.. "
                f"Validation accuracy: {validation_accuracy/len(validation_loader):.3f}"
            )
            running_loss = 0
    return model, optimizer


train_data, validation_data = create_data(data_dir)
train_loader, validation_loader = create_loaders(train_data, validation_data)
print('Data is loaded from {}'.format(data_dir))

model, criterion, optimizer = create_network(arch, device, learning_rate,
                                             dropout, hidden_units)

print('Starting to train network')
model, optimizer = train_network(model, device, optimizer, train_loader,
                                 validation_loader, epochs)
print('Network training finished')

if save_dir != None:
    save_checkpoint(save_dir, model, arch, train_data, learning_rate, dropout,
                    hidden_units)
    print('Checkpoint is saved to {}'.format(save_dir))
示例#23
0
                nterms = gene_annotation[node]
                sim = compute_gene_sim_total(nterms, terms)
                #sim = compute_gene_sim_max(nterms, terms)
                non_sim_avg += sim
        non_sim_avg /= count
        print "%s, %f, %f" % (gene, sim_avg, non_sim_avg)


if __name__ == "__main__":
    dag = DAG(config.go_fpath)

    gene_annotation = utils.get_annotation(config.annotation_fpath, config.filtered_annotation_fpath, dag.get_root().id)

    term_ic = utils.calculate_ic(gene_annotation, dag, config.ic_fpath)

    network = utils.create_network(config.network_fpath)
    # Remove unannotated gene from network
    for node in network.nodes():
        if not node in gene_annotation:
            network.remove_node(node)
    # Remove individual nodes by get the largest indepedent connected component
    network = nx.connected_component_subgraphs(network)[0]

    sim_cache = utils.read_sim(config.simcache_fpath)

    #compute_term_in_neighbour_ratio()
    #compute_avg_term_num()

    compute_avg_sim()

示例#24
0
    predictors, variables_dict[current_response], random_state=33)

print('Finished Splitting Data')

# Parameters
learning_rate = 0.01
training_epochs = 10
# Network Parameters
hidden_layer_nodes = [100, 100, 100]

# tf Graph input
x = tf.placeholder("float", [None, len(xtrain[0])], name='x')
y = tf.placeholder("float", [None])

# Construct model
pred = tf.transpose(create_network(x, hidden_layer_nodes, num_classes=1))

# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Launch the graph
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # Training cycle
    for epoch in range(training_epochs):
        # Run optimization op (backprop) and cost op (to get loss value)
        sess.run([optimizer, cost, pred], feed_dict={x: xtrain, y: ytrain})
        print('Completed Epoch {} of {}'.format(epoch + 1, training_epochs))
xtrain, xtest, ytrain, ytest = train_test_split(predictors, variables_dict[response], random_state=33)

print('Finished Splitting Data')
ytrain = np.where(np.array(ytrain) >= 0, 1, 0)
ytest = np.where(np.array(ytest) >= 0, 1, 0)
ytrain_hot = np.zeros((len(ytrain), 2))
ytrain_hot[np.arange(len(ytrain)), ytrain] = 1

ytest_hot = np.zeros((len(ytest), 2))
ytest_hot[np.arange(len(ytest)), ytest] = 1

x = tf.placeholder(tf.float32, [None, len(predictor_names)], name='x')
y = tf.placeholder(tf.float32, [None, 2])

output_layer = create_network(x, [33, 33, 33], num_classes=2)

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output_layer))
train_step = tf.train.AdamOptimizer(.01).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()


print('Starting Model Training')

num_epochs = 33
for epoch in range(num_epochs):
    sess.run(train_step, feed_dict={x: xtrain, y: ytrain_hot})
    print('Completed Epoch {} of {}'.format(epoch + 1, num_epochs))

correct_prediction = tf.equal(tf.argmax(output_layer, 1), tf.argmax(y, 1))
示例#26
0
def create():
    """ Create a set of Openstack resources.
    If the required resources do not exist, new one will be created.
    """
    if not conn.network.find_network(my_network_name):
        netw = utils.create_network(conn, my_network_name)
    else:
        netw = conn.network.find_network(my_network_name)
        print("Network {} exists already.".format(my_network_name))

    if not conn.network.find_subnet(my_subnet_name):
        subn = utils.create_subnet(conn, netw, my_subnet_name, my_cidr)
    else:
        subn = conn.network.find_subnet(my_subnet_name)
        print("Subnet {} exists already.".format(my_subnet_name))

    if not conn.network.find_router(my_router_name):
        rout = utils.create_router(conn, my_router_name, public_net)
        utils.add_router_interface(conn, rout, subn)
    else:
        rout = conn.network.find_router(my_router_name)
        print("Router {} exists already.".format(my_router_name))

    """ Check whether the provided resources exist. If they do not exist
    prompt message will show and servers creating will not continue.
    Otherwise servers will be created.
    """
    if not (image and flavour and keypair and security_group):
        print(
            "Please make sure the provided image {}, flavour {}, keypair {} or security_group {} exists already.".format(
                IMAGE, FLAVOUR, KEYPAIR, SECURITYGROUP
            )
        )
    else:
        # Create server one by one
        for server in server_list:
            n_serv = conn.compute.find_server(server)
            if not n_serv:
                print("------------ Creating server {}... --------".format(server))
                n_serv = conn.compute.create_server(
                    name=server,
                    image_id=image.id,
                    flavor_id=flavour.id,
                    networks=[{"uuid": conn.network.find_network(my_network_name).id}],
                    key_name=keypair.name,
                    security_groups=[{"sgid": security_group.id}],
                )
                conn.compute.wait_for_server(n_serv, wait=180)
                print("Server {} is created successfully".format(n_serv))
            else:
                print("Server {} exists already".format(server))

        # Checking, creating and attaching floating ip to web server
        if not conn.get_server(name_or_id=server_list[0])["interface_ip"]:
            print(
                "-------- Creating and attaching floating ip to server {} --------".format(
                    server_list[0]
                )
            )
            conn.compute.wait_for_server(n_serv)
            conn.create_floating_ip(
                network=PUBLICNET, server=conn.compute.find_server(server_list[0])
            )
        else:
            print("Floating ip is attached to server {} already".format(server_list[0]))