コード例 #1
0
def train(gpu, region_id, mode, checkpoint_path):
    """ main training/evaluation method
    """
    # ------------
    # model & data
    # ------------
    params = cf.get_params(region_id=region_id, collapse_time=True)
    data = DataModule(params['data_params'], params['training_params'])
    model = load_model(Model, params, checkpoint_path)

    # ------------
    # trainer
    # ------------
    trainer = get_trainer(gpu)
    print_training(params['data_params'])

    # ------------
    # train & final validation
    # ------------
    if mode == 'train':
        print("-----------------")
        print("-- TRAIN MODE ---")
        print("-----------------")
        trainer.fit(model, data)

    # validate
    do_test(trainer, model, data.val_dataloader())
コード例 #2
0
def add_slot_embs_to_slu_embs(slot_embs_file, slu_embs_file):
    from src.slu.datareader import datareader
    from src.utils import init_experiment
    from config import get_params

    with open(slot_embs_file, "rb") as f:
        slot_embs_dict = pickle.load(f)
    slu_embs = np.load(slu_embs_file)

    params = get_params()
    logger = init_experiment(params, logger_filename=params.logger_filename)
    _, vocab = datareader(use_label_encoder=True)

    new_slu_embs = np.zeros(
        (vocab.n_words, 400))  # 400: word + char level embs

    # copy previous embeddings
    prev_length = len(slu_embs)
    new_slu_embs[:prev_length, :] = slu_embs

    for slot_name in slot_list:
        emb = None
        index = vocab.word2index[slot_name]
        if index < prev_length: continue
        for domain, slot_embs in slot_embs_dict.items():
            slot_list_based_on_domain = domain2slot[domain]
            if slot_name in slot_list_based_on_domain:
                slot_index = slot_list_based_on_domain.index(slot_name)
                emb = slot_embs[slot_index]
                break
        assert emb is not None
        new_slu_embs[index] = emb

    np.save("../data/snips/emb/slu_word_char_embs_with_slotembs.npy",
            new_slu_embs)
コード例 #3
0
def gen_embs_for_vocab():
    from src.datareader import datareader
    from src.utils import load_embedding, init_experiment
    from config import get_params
    params = get_params()
    logger = init_experiment(params, logger_filename=params.logger_filename)

    _, vocab = datareader()
    embedding = load_embedding(vocab, 300, "/data/sh/glove.6B.300d.txt",
                               "/data/sh/coachdata/snips/emb/oov_embs.txt")
    np.save("/data/sh/coachdata/snips/emb/slu_embs.npy", embedding)
コード例 #4
0
def gen_embs_for_vocab():
    from src.slu.datareader import datareader
    from src.utils import load_embedding, init_experiment
    from config import get_params

    params = get_params()
    logger = init_experiment(params, logger_filename=params.logger_filename)

    _, vocab = datareader()
    embedding = load_embedding(vocab, 300, "PATH_OF_THE_WIKI_EN_VEC",
                               "../data/snips/emb/oov_embs.txt")
    np.save("../data/snips/emb/slu_embs.npy", embedding)
コード例 #5
0
ファイル: 4-inference.py プロジェクト: iarai/weather4cast
def get_data_iterator(region_id, data_path, splits_path, data_split='test', collapse_time=True, 
                      batch_size=32, shuffle=False, num_workers=0):
    """ Creates an iterator for data in region 'region_id' for the days in `splits_path`
    """
    
    params = cf.get_params(region_id=region_id, data_path=data_path, splits_path=splits_path)
    params['data_params']['collapse_time'] = collapse_time

    ds = create_dataset(data_split, params['data_params'])
    dataloader = DataLoader(ds, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
    
    data_splits, test_sequences = data_utils.read_splits(params['data_params']['train_splits'], params['data_params']['test_splits'])
    test_dates = data_splits[data_splits.split=='test'].id_date.sort_values().values

    return iter(dataloader), test_dates, params
コード例 #6
0
def build_coefs(n_tasks,
                n_sources=1,
                overlap=100,
                seed=None,
                positive=False,
                hemi="lh",
                illustration=False,
                labels_type="any",
                dataset="camcan",
                spacing="ico4"):
    params = cfg.get_params(dataset)
    data_path = params["data_path"]
    labels = np.load(data_path + "label/labels-%s-%s-%s.npy" %
                     (labels_type, spacing, hemi))
    n_labels, n_features = labels.shape
    rng = np.random.RandomState(seed)
    # simulate random activation (one per label)
    coefs = np.zeros((n_features, n_tasks_max))
    if overlap < 0. or overlap > 100:
        raise ValueError("Overlap must be in 0-100%. Got %s." % overlap)

    frac = Fraction(overlap, 100)
    denom = frac.denominator
    numer = frac.numerator
    for l in range(n_sources):
        labels_idx, = np.where(labels[l])
        n_source_in_label = len(labels_idx)
        choices = np.arange(n_source_in_label)
        permutation = rng.permutation(choices)
        ido = permutation[0]
        if positive:
            sign = 1
        else:
            sign = (-1)**rng.randint(2)
        idx_tasks_o = np.arange(n_tasks_max)
        mod = idx_tasks_o % denom < numer
        idx_tasks_o = idx_tasks_o[mod]
        vals = 10. * (2 + rng.rand(len(idx_tasks_o)))
        coefs[labels_idx[ido], idx_tasks_o] = sign * vals
        idx_no = list(set(np.arange(n_tasks_max)) - set(idx_tasks_o))
        no = len(idx_no)
        if no:
            choices_no = permutation[1:no + 1]
            vals = 10. * (2 + rng.rand(no))
            idno = rng.choice(choices_no, size=no)
            coefs[labels_idx[idno], np.array(idx_no)] = sign * vals
    coefs = coefs[:, :n_tasks]
    return coefs
コード例 #7
0
def train(model, trainloader):
    params = get_params()
    ITER_LOG = params.ITER_LOG

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    now = datetime.datetime.now()
    optimizer = torch.optim.AdamW(model.parameters(), lr=params.LEARNING_RATE)
    criterion = nn.BCELoss()
    losses = []
    for epoch in tqdm(range(params.N_EPOCHS)):
        loss_log = 0.0
        y_true = []
        y_pred = []
        for i, data in enumerate(trainloader, 0):
            mels, labels = data[0].to(device), data[1].to(device)

            pred = model(mels.unsqueeze(-1).permute(0, 3, 1, 2))
            optimizer.zero_grad()
            loss = criterion(pred.float(), labels.float())
            loss_log += loss.item()
            loss.backward()
            optimizer.step()
            pred = np.round(pred.to('cpu').detach())
            target = np.round(labels.to('cpu').detach())
            y_pred.extend(pred.tolist())
            y_true.extend(target.tolist())
            if i % ITER_LOG == ITER_LOG - 1:
                # wandb.log({"loss": loss_log / ITER_LOG})

                print('[%d, %5d] Running loss: %.3f' %
                      (epoch + 1, i + 1, loss_log / ITER_LOG))
                losses.append(loss_log / ITER_LOG)
                loss_log = 0.0

                print('time:', datetime.datetime.now() - now)
                now = datetime.datetime.now()

                # print('Acc:\t', accuracy(pred.to('cpu').detach(), labels.to('cpu').detach()).item())

                print('Acc:\t', accuracy_score(y_true, y_pred))

    plt.plot(list(range(params.N_EPOCHS)), losses)
    plt.show()

    print('Finished Training')

    return model
コード例 #8
0
ファイル: main.py プロジェクト: kehuantiantang/Beta-VAE
def main(args):
    seed = args.seed
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)

    params = get_params(args.dataset)
    params.update(vars(args))
    pprint.pprint(params)

    net = Solver(params)

    if args.train:
        net.train()
    else:
        net.viz_traverse(0)
コード例 #9
0
def main():
    params = get_params()
    set_random_seed(params.RANDOM_SEED)
    parse_data()
    data = DatasetNorm('cutted_data')
    train_set, test_set = torch.utils.data.random_split(
        data, [data.__len__() - 100, 100])
    trainloader = DataLoader(dataset=train_set,
                             batch_size=params.BATCH_SIZE,
                             shuffle=True,
                             num_workers=8)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    tcnn = TempoCNN().to(device)

    wandb.init(project="tcnn")
    config = wandb.config
    config.learning_rate = 0.001
    wandb.watch(tcnn)

    if not params.LOAD_MODEL:
        model = train(tcnn, trainloader)
        save_model(model)
    else:
        model = load_model().to(device)

    testloader = DataLoader(dataset=test_set,
                            batch_size=params.BATCH_SIZE,
                            shuffle=True)

    iters = 0
    loss = 0.0
    cr_loss = nn.BCELoss()
    for i, data in enumerate(testloader, 0):
        tcnn.eval()
        mels, labels = data[0].to(device), data[1].to(device)
        pred = model(mels.unsqueeze(-1).permute(0, 3, 1, 2)).to('cpu').detach()
        res = accuracy(pred, labels)
        print(res)

        loss += cr_loss(pred.float(), labels.float().to('cpu').detach()).item()
        iters += 1

    print(loss / iters)
コード例 #10
0
def combine_word_with_char_embs_for_vocab(wordembs_file):
    from src.slu.datareader import datareader
    from src.utils import init_experiment
    from config import get_params
    import torchtext
    char_ngram_model = torchtext.vocab.CharNGram()

    params = get_params()
    logger = init_experiment(params, logger_filename=params.logger_filename)

    _, vocab = datareader()
    embedding = np.load(wordembs_file)

    word_char_embs = np.zeros((vocab.n_words, 400))
    for index, word in vocab.index2word.items():
        word_emb = embedding[index]
        char_emb = char_ngram_model[word].squeeze(0).numpy()
        word_char_embs[index] = np.concatenate((word_emb, char_emb), axis=-1)

    np.save("../data/snips/emb/slu_word_char_embs.npy", word_char_embs)
コード例 #11
0
def build_dataset(coefs,
                  std=0.2,
                  seed=None,
                  same_design=False,
                  randomize_subjects=False,
                  hemi="lh",
                  dataset="camcan",
                  age_min=0,
                  age_max=30,
                  spacing="ico4"):
    """Build multi-task regression data."""
    params = cfg.get_params(dataset)
    data_path = params["data_path"]
    rng = np.random.RandomState(seed)
    n_features, n_tasks = coefs.shape
    subjects = cfg.get_subjects_list(dataset, age_min, age_max)[:n_tasks]
    if randomize_subjects:
        subjects = rng.permutation(subjects)
    if same_design:
        s_id = rng.randint(0, n_tasks)
        subjects = n_tasks * [subjects[s_id]]
    x_names = [
        data_path + "leadfields/X_%s_%s_%s.npy" % (s, hemi, spacing)
        for s in subjects
    ]
    X = np.stack([np.load(x_name) for x_name in x_names], axis=0)
    X = X.astype(np.float64)
    y = [x.dot(coef) for x, coef in zip(X, coefs.T)]
    y = np.array(y)
    y *= 1e4  # get Y in fT/cm
    n_samples = y.shape[1]
    std *= np.std(y, axis=1).mean()
    noise = std * rng.randn(n_tasks_max, n_samples)
    y += noise[:n_tasks]
    X = X[:n_tasks]
    y = y[:n_tasks]

    return X, y
コード例 #12
0
                                     seed=TP['shuffle_seed'])
    X_train = data_set['x_train']
    X_test = data_set['x_test']
    Y_train = data_set['y_train']
    Y_test = data_set['y_test']
    total_counts = np.sum(Y_train, axis=0) + np.sum(Y_test, axis=0)

    train_gen = ImageDataGenerator(horizontal_flip=True,
                                   vertical_flip=True,
                                   rotation_range=180,
                                   zoom_range=(1, 1.2),
                                   preprocessing_function=xcept_preproc)
    test_gen = ImageDataGenerator(preprocessing_function=xcept_preproc)

    trials = Trials()
    algo = partial(tpe.suggest, n_startup_jobs=TP['n_rand_hp_iters'])
    argmin = fmin(xcept_net,
                  space=get_params(MP, TP),
                  algo=algo,
                  max_evals=TP['n_total_hp_iters'],
                  trials=trials)

    end_time = dt.now()
    print_end_details(start_time, end_time)
    print("Evalutation of best performing model:")
    print(trials.best_trial['result']['loss'])

    with open(op.join(ckpt_dir, 'trials_{}.pkl'.format(start_time)),
              "wb") as pkl_file:
        pickle.dump(trials, pkl_file)
コード例 #13
0
ファイル: train_xcept.py プロジェクト: ryanenian/ml-hv-grid
            print('For {}ing, found {} {} images'.format(fold, n_fnames, sub_fold))

            if fold == 'test':
                total_test_images += n_fnames
    if TP['steps_per_test_epo'] is None:
        TP['steps_per_test_epo'] = int(np.ceil(total_test_images /
                                               DF['flow_from_dir']['batch_size']) + 1)

    ###################################
    # Set up generators
    ###################################
    train_gen = ImageDataGenerator(preprocessing_function=xcept_preproc,
                                   **DF['image_data_generator'])
    test_gen = ImageDataGenerator(preprocessing_function=xcept_preproc)

    ############################################################
    # Run training with hyperparam optimization (using hyperopt)
    ############################################################
    trials = Trials()
    algo = partial(tpe.suggest, n_startup_jobs=TP['n_rand_hp_iters'])
    argmin = fmin(xcept_net, space=get_params(MP, TP), algo=algo,
                  max_evals=TP['n_total_hp_iters'], trials=trials)

    end_time = dt.now()
    print_end_details(start_time, end_time)
    print("Evalutation of best performing model:")
    print(trials.best_trial['result']['loss'])

    with open(op.join(ckpt_dir, 'trials_{}.pkl'.format(start_time)), "wb") as pkl_file:
        pickle.dump(trials, pkl_file)
sup
コード例 #14
0
# 保存至文件
def save_words(words, path):
    of = open(path, 'w', encoding='UTF-8')
    for idx, w in enumerate(words):
        of.write(str(w) + '\n')
    of.close()


# 词向量训练方法
def word2vec_proc(params):
    line_sentence = LineSentence(config.words_file)
    model = word2vec.Word2Vec(line_sentence, size=params['vector_dim'], window=params['window_size'],
                              min_count=params['min_frequency'], workers=params['workers'], sg=params['use_skip_gram'],
                              hs=params['use_hierarchical_softmax'], negative=params['negative_size'],
                              iter=params['pre_proc_epochs'])
    return model


def fasttext_proc(params):
    line_sentence = LineSentence(config.words_file)
    model = fasttext.FastText(line_sentence, size=params['vector_dim'], window=params['window_size'],
                              min_count=params['min_frequency'], workers=params['workers'], sg=params['use_skip_gram'],
                              hs=params['use_hierarchical_softmax'], negative=params['negative_size'],
                              iter=params['pre_proc_epochs'])
    return model


if __name__ == '__main__':
    pre_processing(config.get_params())
コード例 #15
0
def main(argv):
    # set fixed random seed, load config files
    tf.random.set_seed(RANDOM_SEED)

    # using mix precision or not
    if MIXPRECISION:
        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)

    # get params for model
    train_iter, input_size, num_cls, lrs_schedule_params, loss_params, parser_params, model_params = get_params(
        FLAGS.name)

    # -----------------------------------------------------------------
    # set up Grappler for graph optimization
    # Ref: https://www.tensorflow.org/guide/graph_optimization
    @contextlib.contextmanager
    def options(opts):
        old_opts = tf.config.optimizer.get_experimental_options()
        tf.config.optimizer.set_experimental_options(opts)
        try:
            yield
        finally:
            tf.config.optimizer.set_experimental_options(old_opts)

    # -----------------------------------------------------------------
    # Creating the instance of the model specified.
    logging.info("Creating the model instance of YOLACT")
    model = Yolact(**model_params)

    # add weight decay
    for layer in model.layers:
        if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
                layer, tf.keras.layers.Dense):
            layer.add_loss(lambda: tf.keras.regularizers.l2(FLAGS.weight_decay)
                           (layer.kernel))
        if hasattr(layer, 'bias_regularizer') and layer.use_bias:
            layer.add_loss(lambda: tf.keras.regularizers.l2(FLAGS.weight_decay)
                           (layer.bias))

    # -----------------------------------------------------------------
    # Creating dataloaders for training and validation
    logging.info("Creating the dataloader from: %s..." % FLAGS.tfrecord_dir)
    dateset = ObjectDetectionDataset(dataset_name=FLAGS.name,
                                     tfrecord_dir=os.path.join(
                                         FLAGS.tfrecord_dir, FLAGS.name),
                                     anchor_instance=model.anchor_instance,
                                     **parser_params)
    train_dataset = dateset.get_dataloader(subset='train',
                                           batch_size=FLAGS.batch_size)
    valid_dataset = dateset.get_dataloader(subset='val', batch_size=1)
    # count number of valid data for progress bar
    # Todo any better way to do it?
    num_val = 0
    for _ in valid_dataset:
        num_val += 1
    # -----------------------------------------------------------------
    # Choose the Optimizor, Loss Function, and Metrics, learning rate schedule
    lr_schedule = learning_rate_schedule.Yolact_LearningRateSchedule(
        **lrs_schedule_params)
    logging.info("Initiate the Optimizer and Loss function...")
    optimizer = tf.keras.optimizers.SGD(learning_rate=lr_schedule,
                                        momentum=FLAGS.momentum)
    criterion = loss_yolact.YOLACTLoss(**loss_params)
    train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
    loc = tf.keras.metrics.Mean('loc_loss', dtype=tf.float32)
    conf = tf.keras.metrics.Mean('conf_loss', dtype=tf.float32)
    mask = tf.keras.metrics.Mean('mask_loss', dtype=tf.float32)
    seg = tf.keras.metrics.Mean('seg_loss', dtype=tf.float32)
    # -----------------------------------------------------------------

    # Setup the TensorBoard for better visualization
    # Ref: https://www.tensorflow.org/tensorboard/get_started
    logging.info("Setup the TensorBoard...")
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    train_log_dir = './logs/gradient_tape/' + current_time + '/train'
    test_log_dir = './logs/gradient_tape/' + current_time + '/test'
    train_summary_writer = tf.summary.create_file_writer(train_log_dir)
    test_summary_writer = tf.summary.create_file_writer(test_log_dir)

    # -----------------------------------------------------------------
    # Start the Training and Validation Process
    logging.info("Start the training process...")

    # setup checkpoints manager
    checkpoint = tf.train.Checkpoint(step=tf.Variable(1),
                                     optimizer=optimizer,
                                     model=model)
    manager = tf.train.CheckpointManager(checkpoint,
                                         directory="./checkpoints",
                                         max_to_keep=5)
    # restore from latest checkpoint and iteration
    status = checkpoint.restore(manager.latest_checkpoint)
    if manager.latest_checkpoint:
        logging.info("Restored from {}".format(manager.latest_checkpoint))
    else:
        logging.info("Initializing from scratch.")

    best_masks_map = 0.
    iterations = checkpoint.step.numpy()

    for image, labels in train_dataset:
        # check iteration and change the learning rate
        if iterations > train_iter:
            break

        checkpoint.step.assign_add(1)
        iterations += 1
        with options({
                'constant_folding': True,
                'layout_optimize': True,
                'loop_optimization': True,
                'arithmetic_optimization': True,
                'remapping': True
        }):
            loc_loss, conf_loss, mask_loss, seg_loss = train_step(
                model, criterion, train_loss, optimizer, image, labels,
                num_cls)
        loc.update_state(loc_loss)
        conf.update_state(conf_loss)
        mask.update_state(mask_loss)
        seg.update_state(seg_loss)
        with train_summary_writer.as_default():
            tf.summary.scalar('Total loss',
                              train_loss.result(),
                              step=iterations)
            tf.summary.scalar('Loc loss', loc.result(), step=iterations)
            tf.summary.scalar('Conf loss', conf.result(), step=iterations)
            tf.summary.scalar('Mask loss', mask.result(), step=iterations)
            tf.summary.scalar('Seg loss', seg.result(), step=iterations)

        if iterations and iterations % FLAGS.print_interval == 0:
            tf.print(
                "Iteration {}, LR: {}, Total Loss: {}, B: {},  C: {}, M: {}, S:{} "
                .format(iterations,
                        optimizer._decayed_lr(var_dtype=tf.float32),
                        train_loss.result(), loc.result(), conf.result(),
                        mask.result(), seg.result()))

        if iterations and iterations % FLAGS.save_interval == 0:
            # save checkpoint
            save_path = manager.save()
            logging.info("Saved checkpoint for step {}: {}".format(
                int(checkpoint.step), save_path))

            # validation and print mAP table
            all_map = evaluate(model, valid_dataset, num_val, num_cls)
            box_map, mask_map = all_map['box']['all'], all_map['mask']['all']
            tf.print(f"box mAP:{box_map}, mask mAP:{mask_map}")

            with test_summary_writer.as_default():
                tf.summary.scalar('Box mAP', box_map, step=iterations)
                tf.summary.scalar('Mask mAP', mask_map, step=iterations)

            # Saving the weights:
            if mask_map > best_masks_map:
                best_masks_map = mask_map
                model.save_weights(
                    f'{FLAGS.weights}/weights_{FLAGS.name}_{str(best_masks_map)}.h5'
                )

            # reset the metrics
            train_loss.reset_states()
            loc.reset_states()
            conf.reset_states()
            mask.reset_states()
            seg.reset_states()
コード例 #16
0
    def computeCost(self, rootDic, noAGV, k, u, utilObj, prevTask):
        #rootDic = sys.argv[1]
        #taskList = np.arange(100)
        #q = taskList
        #state = 'INITIALISE'

        #itr = 0

        currTask = u
        #trainObj = training(rootDic, noAGV)
        #obsObj = utility(rootDic)
        optim = torch.optim.Adam(self.Model.parameters())
        #lastTask = 0

        # if state == 'START':
        #     currTask = random.randint(0, 100)
        #     if (currTask in q):
        #         q= q[q != currTask]
        #         #q.remove()
        #         state = 'EXECUTE'
        #     elif (currTask not in q):
        #         state = 'START'
        # if state == 'EXECUTE':

        #itr = itr +1
        obs = utilObj.readObs(
            prevTask)  # this iteration have to be calculated from planner
        # The last task is to be decided by the planner
        #allsTensor, allobsTensor = \
        estimatedCost = self.doTrain(
            k, obs, currTask, prevTask,
            utilObj)  #itr, obs, currTask, prevTask, obsObj
        print('estimated Cost' + str(estimatedCost))

        #utilObj.storeObs(k, currTask)

        params = config.get_params()
        if (k >= 5):
            if k % params["timestep"] == 0:
                print("Calculating Loss")
                allsTensor = torch.Tensor(self.k, 1, 3)
                allsTensor = torch.cat(self.allS, dim=1)
                #out=)
                allobsTensor = torch.Tensor(self.k, 1, 3)
                allobsTensor = torch.cat(self.allObs, dim=1)
                L = self.loss_fn(allsTensor, allobsTensor)
                print('Loss =' + str(L))
                # sys.exit()

                L.backward()
                self.allObs.clear()
                self.allS.clear()

                optim.step()
                optim.zero_grad()

            #end if

        np.savetxt('estimate.xls', self.saveEstimate, delimiter=',')
        np.savetxt('observations.xls', self.saveObs, delimiter=',')

        #self.stateDict = collections.OrderedDict(sorted(self.stateDict.items()))

        return estimatedCost
コード例 #17
0
import tensorflow as tf

from config import PASCAL_CLASSES, COLORS, get_params, ROOT_DIR
from data.coco_dataset import ObjectDetectionDataset
from utils import learning_rate_schedule
from utils.utils import postprocess, denormalize_image
from yolact import Yolact

# Todo Add your custom dataset
tf.random.set_seed(1234)
NAME_OF_DATASET = "pascal"
CLASS_NAMES = PASCAL_CLASSES

# -----------------------------------------------------------------------------------------------
# create model and dataloader
train_iter, input_size, num_cls, lrs_schedule_params, loss_params, parser_params, model_params = get_params(
    NAME_OF_DATASET)
model = Yolact(**model_params)
dateset = ObjectDetectionDataset(dataset_name=NAME_OF_DATASET,
                                 tfrecord_dir=os.path.join(
                                     ROOT_DIR, "data", NAME_OF_DATASET),
                                 anchor_instance=model.anchor_instance,
                                 **parser_params)
train_dataset = dateset.get_dataloader(subset='train', batch_size=1)
valid_dataset = dateset.get_dataloader(subset='val', batch_size=1)
# -----------------------------------------------------------------------------------------------
# Restore CheckPoints
# Choose the Optimizor, Loss Function, and Metrics, learning rate schedule
lr_schedule = learning_rate_schedule.Yolact_LearningRateSchedule(
    **lrs_schedule_params)
optimizer = tf.keras.optimizers.SGD(learning_rate=lr_schedule, momentum=0.9)
コード例 #18
0
    def scheduling(self):
        params = config.get_params()
        state = 'INITIALISE'
        while self.itr <= params["repNo"]:
            if state == 'INITIALISE':
                utilObj = utility(self.dirName, self.ownNo)
                if (self.ownNo == 1):
                    lastTask = 2
                    utilObj.storeCurrLoc(self.ownNo, lastTask)
                    #utilObj.storeObs(self.itr , lastTask)
                    utilObj.storeObs(k=1, currTask=lastTask, estCost=0)
                elif (self.ownNo == 2):
                    lastTask = 8
                    utilObj.storeCurrLoc(self.ownNo, lastTask)
                    utilObj.storeObs(k=1, currTask=lastTask, estCost=0)
                    #utilObj.storeCurrLoc(self.ownNo, 8)
                elif (self.ownNo == 3):
                    lastTask = 12
                    utilObj.storeCurrLoc(self.ownNo, lastTask)
                    utilObj.storeObs(k=1, currTask=lastTask, estCost=0)
                    #utilObj.storeCurrLoc(self.ownNo, 12)
                elif (self.ownNo == 4):
                    lastTask = 7
                    utilObj.storeCurrLoc(self.ownNo, lastTask)
                    utilObj.storeObs(k=1, currTask=lastTask, estCost=0)
                    #utilObj.storeCurrLoc(self.ownNo, 7)
                #end if

                state = 'START'
            if state == 'START':
                self.itr += 1
                currLoc = utilObj.getCurrLoc(self.ownNo)
                print("Itr: ", self.itr)
                print("currLoc at AGV level: ", currLoc)
                stateDict, lenTaskSeq = self.planning(currLoc, utilObj, params)
                stateDict = collections.OrderedDict(sorted(stateDict.items()))
                #print(stateDict)
                df = pd.DataFrame({
                    key: pd.Series(value)
                    for key, value in stateDict.items()
                })
                df.to_csv('estState.xlsx', encoding='utf-8', index=False)
                #
                #df = pd.DataFrame(data=stateDict, index=[0])

                #df = (df.T)

                #print(df)

                #df.to_excel()
                #print("Tasksequence of AGV", self.ownNo, self.taskSequence)
                self.completedTask_lst[self.itr] = self.taskSequence
                state = "COMPLETE"
            if state == 'COMPLETE':
                lastTask = self.endTask

                utilObj.storeCurrLoc(self.ownNo, lastTask)
                #utilObj.storeObs(self.lenT, self.endTask)
                outtxt1 = 'AGV: ' + str(self.ownNo) + ' ' + 'ITR: ' + str(
                    self.itr) + ' ' + 'TASKS: ' + str(
                        self.completedTask_lst) + '\n'
                self.fid1 = open(self.f1, 'a')
                self.fid1.write(outtxt1)
                self.fid1.close()
                state = 'START'
コード例 #19
0
ファイル: run.py プロジェクト: yucoian/subMrc
# parse arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mode', dest='mode', type=int, default=2,
        help='run mode - (0-train+test, 1-train only, 2-test only, 3-val only)')
parser.add_argument('--nlayers', dest='nlayers', type=int, default=3,
        help='Number of reader layers')
parser.add_argument('--dataset', dest='dataset', type=str, default='cmrc',
        help='Dataset - cmrc')
parser.add_argument('--seed', dest='seed', type=int, default=36,
        help='Seed for different experiments with same settings')
parser.add_argument('--gating_fn', dest='gating_fn', type=str, default='T.mul',
        help='Gating function (T.mul || Tsum || Tconcat)')
args = parser.parse_args()
cmd = vars(args)
params = get_params(cmd['dataset'])
params.update(cmd)

np.random.seed(params['seed'])
random.seed(params['seed'])

# save directory
w2v_filename = params['word2vec'].split('/')[-1].split('.')[0] if params['word2vec'] else 'None'
save_path = ('experiments/mul_bpe_1k cmrc_nhid128_nlayers3_dropout0.5_wiki_chardim100_train1_seed36_use-feat1_gfT.mul')
if not os.path.exists(save_path): os.makedirs(save_path)

# train
if params['mode']<2:
    train.main(save_path, params)

# test
コード例 #20
0
ファイル: preprocess_data.py プロジェクト: Seleucia/CNNRNet
def depth_meansubtract(params):
    for dir in params["dataset"]:
        if dir ==-1:
            continue
        normalizer=52492
        sbt=params["depth_mean"]
        im_type='depth'
        im_type_to='mean_depth'
        new_dir=dir[0]+im_type_to+"/"
        if os.path.exists(new_dir):
           rmtree(new_dir)
        if not os.path.exists(new_dir):
            os.makedirs(new_dir)
            full_path=dir[0]+'/'+im_type+'/*.png'
            lst=glob.glob(full_path)
            for f in lst:
                img = Image.open(f)
                arr1= numpy.array(img,theano.config.floatX)
                arr2=np.zeros_like(arr1)
                arr2[arr1.nonzero()]=sbt
                arr1=(arr1-arr2)/normalizer
                f=new_dir+os.path.basename(f).replace(".png","")
                np.save(f,arr1)
            print("data set converted %s"%(dir[0]))
        else:
            print("data set has already proccessed %s"%(dir[0]))
    print "Depth data proccessing completed"

params=config.get_params()
depth_meansubtract(params)
コード例 #21
0
ファイル: beam_search.py プロジェクト: war11393/seq2seq
    def result_proc(text):
        """
        对预测结果做最后处理
        :param text: 单条预测结果
        :return:
        """
        # text = text.lstrip(' ,!。')
        text = text.replace(' ', '')
        text = text.strip()
        if '<end>' in text:
            text = text[:text.index('<end>')]
        return text

    test_csv = pd.read_csv(config.test_set, encoding="UTF-8")
    # 赋值结果
    test_csv['Prediction'] = results
    # 提取ID和预测结果两列
    test_df = test_csv[['QID', 'Prediction']]
    # 结果处理
    test_df['Prediction'] = test_df['Prediction'].apply(result_proc)
    # 保存结果
    test_df.to_csv(config.inference_result_path, index=None, sep=',')
    print('已保存文件至{}'.format(config.inference_result_path))


if __name__ == '__main__':
    params = config.get_params()
    params['batch_size'] = 8
    params['beam_size'] = 8
    beam_search(params)
コード例 #22
0
from config import get_params
from yolact import Yolact

name = "coco"
train_iter, input_size, num_cls, lrs_schedule_params, loss_params, parser_params, model_params = get_params(
    name)
model = Yolact(**model_params)
model.build(input_shape=(2, 550, 550, 3))
model.summary()
コード例 #23
0
import torch
from torch import nn
from config import get_params

params = get_params()


class MultiFilterBlock(nn.Module):
    def __init__(self, in_channels, out_channels, pool_size, n_parallels=4):
        super(MultiFilterBlock, self).__init__()
        self.pooling = nn.Sequential(nn.AvgPool2d(kernel_size=(pool_size, 1)),
                                     nn.BatchNorm2d(in_channels))
        convs = []
        filter = 4
        for i in range(n_parallels):
            convs.append(
                nn.Conv2d(in_channels=in_channels,
                          out_channels=26,
                          kernel_size=(1, filter),
                          padding=filter // 2))
            filter += 4

        self.conv_modules = nn.ModuleList(convs)
        self.bottleneck = nn.Conv2d(in_channels=26,
                                    out_channels=out_channels,
                                    kernel_size=1)
        self.elu = nn.ELU()

    def forward(self, x):
        output = self.pooling(x)
        parallel_output = []
コード例 #24
0
    steps_per_val = len(
        data_dict['x_test']) // (MP['batch_size'] * MP['n_gpus'])

    #########################
    # Generate the generators
    #########################
    train_gen = ImageDataGenerator(**IMP)
    test_gen = ImageDataGenerator()

    ######################################################
    # Define Hyperopt optimization and run training trials
    ######################################################
    trials = Trials()
    algo = partial(tpe.suggest, n_startup_jobs=MP['n_rand_hp_iters'])
    argmin = fmin(run_deeplab_trial,
                  space=get_params(MP),
                  algo=algo,
                  max_evals=MP['n_total_hp_iters'],
                  trials=trials)

    ###############################
    # End of training cleanup
    ###############################
    end_time = dt.now()
    print_end_details(start_time, end_time)
    print("Evalutation of best performing model:")
    print(trials.best_trial['result']['loss'])

    # Dump trials object for safe-keeping
    with open(op.join(ckpt_dir, 'trials_{}.pkl'.format(start_time)),
              "wb") as pkl_file: