Example #1
0
    def __init_model(self, entry):
        if entry == "train":
            self.train_manager = DataManager(batch_size=self.batch_size,
                                             tags=self.tags)
            self.total_size = len(self.train_manager.batch_data)
            data = {
                "batch_size": self.train_manager.batch_size,
                "input_size": self.train_manager.input_size,
                "vocab": self.train_manager.vocab,
                "tag_map": self.train_manager.tag_map,
            }
            self.save_params(data)
            dev_manager = DataManager(batch_size=30, data_type="dev")
            self.dev_batch = dev_manager.iteration()

            self.model = BiLSTMCRF(
                tag_map=self.train_manager.tag_map,
                batch_size=self.batch_size,
                vocab_size=len(self.train_manager.vocab),
                dropout=self.dropout,
                embedding_dim=self.embedding_size,
                hidden_dim=self.hidden_size,
            )
            self.restore_model()
        elif entry == "predict":
            data_map = self.load_params()
            input_size = data_map.get("input_size")
            self.tag_map = data_map.get("tag_map")
            self.vocab = data_map.get("vocab")

            self.model = BiLSTMCRF(tag_map=self.tag_map,
                                   vocab_size=input_size,
                                   embedding_dim=self.embedding_size,
                                   hidden_dim=self.hidden_size)
            self.restore_model()
Example #2
0
    def __init__(self,
                 path_train,
                 definition,
                 path_test=None,
                 c_model_name=Classifier.cnn,
                 cv_folds=5,
                 num_epochs=100,
                 batch_size=8,
                 learning_rate=0.0003):
        """
        Initializer for CNNClassification class.

        :param path_train: path to the h5 file for the training dataset
        (simulated data)
        :param definition: Type of SSW definition to use. example: "CP07",
        "U65"
        :param path_test: path to the h5 file for the test dataset (real data)
        :param c_model_name: CNN classifier name which is going to be used
        example: "cnn", "cnn_max_pool"
        :param cv_folds: number of folds for the cross-validation which is
        used to evaluate the performance on the training dataset.
        :param num_epochs: Number of epochs to train the model
        :param batch_size: Batch size for Adam optimizer
        :param learning_rate: Learning rate for Adam optimizer
        """

        # Device configuration
        device = torch.device('cuda:' +
                              os.getenv("CUDA_VISIBLE_DEVICES") if torch.cuda.
                              is_available() else 'cpu')

        SetSeed().set_seed()

        self.data_manager_train = DataManager(path_train)
        if path_test:
            self.data_manager_test = DataManager(path_test)

        self.definition = definition

        self.cv_folds = cv_folds

        self.metric_txt = ["F1", "ROCAUC", "Accuracy"]
        self.metrics = [f1_score, roc_auc_score, accuracy_score]
        self.model_class_name = c_model_name

        # Number of channels in the CNN - number of features to use
        num_ts = 3

        self.classifier = NeuralNetClassifier(
            cnn_model.get_cnn_classes()[c_model_name](num_ts),
            criterion=nn.CrossEntropyLoss,
            max_epochs=num_epochs,
            lr=learning_rate,
            batch_size=batch_size,
            device=device,
            optimizer=torch.optim.Adam,
        )
Example #3
0
    def main_model(self, entry):
        """
        Model Initialization
        """
        # The Training Process
        if entry == "train":
            # Training Process: read Training Data from DataManager
            self.train_manager = DataManager(batch_size=self.batch_size,
                                             data_type='train',
                                             tags=self.tags)
            self.total_size = len(self.train_manager.batch_data)

            # Read the corresponding character index (vocab) and other hyper-parameters
            data = {
                "batch_size": self.train_manager.batch_size,
                "input_size": self.train_manager.input_size,
                "vocab": self.train_manager.vocab,
                "tag_map": self.train_manager.tag_map,
            }

            save_params(data=data, path=self.model_path)

            # Build BiLSTM-CRF Model
            self.model = BiLSTMCRF(tag_map=self.train_manager.tag_map,
                                   batch_size=self.batch_size,
                                   vocab_size=len(self.train_manager.vocab),
                                   dropout=self.dropout,
                                   embedding_dim=self.embedding_size,
                                   hidden_dim=self.hidden_size,
                                   max_length=self.max_length)

            # Evaluation Process: read Dev Data from DataManager
            self.dev_size = DataManager(batch_size=1,
                                        data_type="dev",
                                        tags=self.tags).load_data()
            self.dev_manager = DataManager(batch_size=int(self.dev_size),
                                           data_type="dev",
                                           tags=self.tags)
            self.dev_batch = self.dev_manager.iteration()

            # Restore model if it exists
            self.restore_model()

        # The Testing & Inference Process
        elif entry == "predict":
            data_map = load_params(path=self.model_path)
            input_size = data_map.get("input_size")
            self.tag_map = data_map.get("tag_map")
            self.vocab = data_map.get("vocab")
            self.model = BiLSTMCRF(tag_map=self.tag_map,
                                   vocab_size=input_size,
                                   dropout=0.0,
                                   embedding_dim=self.embedding_size,
                                   hidden_dim=self.hidden_size,
                                   max_length=self.max_length)

            self.restore_model()
Example #4
0
    def main_model(self, entry):
        # The Training Process
        if entry == "train":
            # Training Process: read Training Data from DataManager
            self.train_manager = DataManager(batch_size=self.batch_size, data_type='train', tags=self.tags)
            self.total_size = len(self.train_manager.batch_data)

            # Read the corresponding character index (vocab) and other hyper-parameters
            saved_data = {
                "batch_size": self.train_manager.batch_size,
                "input_size": self.train_manager.input_size,
                "char_vocab": self.train_manager.char_vocab,
                "tag_map": self.train_manager.tag_map,
            }
            save_params(data=saved_data, path=self.model_path)

            # Evaluation Process: read Dev Data from DataManager
            self.dev_size = DataManager(batch_size=1, data_type="dev", tags=self.tags).load_char_data()
            self.dev_manager = DataManager(batch_size=int(self.dev_size), data_type="dev")
            self.dev_batch = self.dev_manager.iteration()

            # Build BiLSTM-CRF Model
            self.model = BiLSTMCRF(
                tag_map=self.train_manager.tag_map,
                batch_size=self.batch_size,
                vocab_size=len(self.train_manager.char_vocab),
                dropout=self.dropout,
                word_num=self.word_num,
                word_dim=self.word_dim,
                char_num=self.char_num,
                char_dim=self.char_dim,
                hidden_dim=self.hidden_size,
            )

            # Restore model if it exists
            self.restore_model()

        # The Inference Process
        elif entry == "predict":
            data = load_params(path=self.model_path)
            input_size = data.get("input_size")
            self.tag_map = data.get("tag_map")
            self.vocab = data.get("char_vocab")
            self.model = BiLSTMCRF(
                tag_map=self.tag_map,
                vocab_size=input_size,
                dropout=1.0,
                word_num=self.word_num,
                word_dim=self.word_dim,
                char_num=self.char_num,
                char_dim=self.char_dim,
                hidden_dim=self.hidden_size,
            )
            self.restore_model()
Example #5
0
    def __init__(self):
        data_manager = DataManager(inputArguments())
        data_manager_2 = DataManager(inputArguments_2())

        train_activeset = data_manager.get_datasets(
        )['train']  # type: <class 'dataset.TransformableFullMetadataSubset'>, len: 42236
        train_new_dataset = data_manager_2.get_datasets()['train']
        test_activeset = data_manager.get_datasets()['validation']
        test_new_dataset = data_manager_2.get_datasets()['validation']

        train_set = []
        for indx in range(len(train_activeset)):
            train_set.append(train_activeset[indx])

        known_indexes = []
        with open('kn_indici.txt', 'r') as file:
            for line in file:
                known_indexes.append(int(line.strip('\n')))

        known_set = []
        for indx in known_indexes:
            known_set.append(train_new_dataset[indx])

        test_indexes = []
        with open('test_indici_newdataset.txt', 'r') as file:
            for line in file:
                test_indexes.append(int(line.strip('\n')))

        test_set = []
        for indx in test_indexes:
            test_set.append(test_new_dataset[indx])

        grad_test_set = []  # solo per testare gradcam
        for indx in range(len(test_activeset)):
            test_set.append(test_activeset[indx])
            # Solo per testare gradcam
            grad_test_set.append(test_activeset[indx])

        expl_loader = torch.utils.data.DataLoader(grad_test_set,
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=2,
                                                  pin_memory=True)

        test_loader = torch.utils.data.DataLoader(test_set,
                                                  batch_size=32,
                                                  shuffle=False,
                                                  num_workers=2,
                                                  pin_memory=True)

        self.tr = train_set
        self.ts = test_loader
        self.kn = known_set
        self.exp = expl_loader
	def init_datasets(self):
		self.Positive_data_list,self.Negative_data_list=self.listData1(self.__Param["data_dir"])
		if self.__Param["mode"] is "training":
			self.DataManager_train_Positive = DataManager(self.Positive_data_list, self.__Param)
			self.DataManager_train_Negative = DataManager(self.Negative_data_list, self.__Param)
		elif self.__Param["mode"] is "testing":
			self.DataManager_test_Positive = DataManager(self.Positive_data_list, self.__Param,shuffle=False)
			self.DataManager_test_Negative = DataManager(self.Negative_data_list, self.__Param,shuffle=False)
		elif self.__Param["mode"] is "savePb":
			pass
		else:
			raise Exception('got a unexpected  mode ')
Example #7
0
def update_charts(by):
    global chart_natn, chart_club, scatter
    global ranking_histo
    if not (np.issubdtype(data[by], int) or np.issubdtype(data[by], float)):
        return no_update
    else:
        chart_natn, chart_club, scatter = DataManager().plot_altair(data,
                                                                    by=by)
        ranking_histo = DataManager().plot_histo(data, by=by)

        return (chart_natn.to_html(), chart_club.to_html(), scatter.to_html(),
                ranking_histo.to_html())
Example #8
0
    def __init_model(self, entry):
        # 模型训练的参数准备
        if entry == "train":
            #创建训练数据集的管理对象
            print(self.tags)
            self.train_manager = DataManager(batch_size=self.batch_size, tags=self.tags)
            print(self.train_manager.batch_data)
            print(len(self.train_manager.batch_data))
            self.total_size = len(self.train_manager.batch_data)
            # print(self.train_manager.batch_data)
            data = {
                "batch_size": self.train_manager.batch_size,
                "input_size": self.train_manager.input_size,
                "vocab": self.train_manager.vocab,
                "tag_map": self.train_manager.tag_map,
            }
            # 保存参数
            self.save_params(data)
            # 验证数据集的准备
            # 创建验证数据集的管理对象
            dev_manager = DataManager(batch_size=30, data_type="dev")
            # 通过data_manager中的迭代器不断将创建的数据管理器对象赋值到dev_batch中,用于下面计算损失的函数
            self.dev_batch = dev_manager.iteration()

            # 模型的主体使用的是BiLSTM来进行语义编码,CRF用来约束各个标签
            self.model = BiLSTMCRF(
                tag_map=self.train_manager.tag_map,
                batch_size=self.batch_size,
                vocab_size=len(self.train_manager.vocab),
                dropout=self.dropout,
                embedding_dim=self.embedding_size,
                hidden_dim=self.hidden_size,
            )
            # 加载恢复模型参数
            self.restore_model()
        # 模型用来预测的参数准备
        elif entry == "predict":
            data_map = self.load_params()
            input_size = data_map.get("input_size")
            self.tag_map = data_map.get("tag_map")
            self.vocab = data_map.get("vocab")
            # 这里创建一个模型对象model
            self.model = BiLSTMCRF(
                tag_map=self.tag_map,
                vocab_size=input_size,
                embedding_dim=self.embedding_size,
                hidden_dim=self.hidden_size
            )
            self.restore_model()
Example #9
0
    def init_datasets(self):
        if self.__Param["mode"] != "savePb":
            self.image_list_train, self.mask_list_train = self.listData_train(
                self.__Param["data_dir"])
            self.image_list_valid, self.mask_list_valid = self.listData_val(
                self.__Param["data_dir"])
            # self.image_list_test, self.mask_list_test = self.listData_test(self.__Param["data_dir"])

            self.DataManager_train = DataManager(self.image_list_train,
                                                 self.mask_list_train,
                                                 self.__Param)
            self.DataManager_valid = DataManager(self.image_list_valid,
                                                 self.mask_list_valid,
                                                 self.__Param,
                                                 shuffle=False)
Example #10
0
 def __init__(self):
     self.env_name = 'gaf-environment-v1.0'
     self.reward_mult = 100000
     self.cash = 100000
     self.total_loss = 0.0
     self.current_action = None
     self.previous_action = None
     self.clock = Clock()
     self.dm = DataManager(self.clock)
     self.pm = PositionManager(self.clock, self.dm, self.cash, 30)
     self.benchmark = PositionManager(self.clock, self.dm, self.cash, 1)
     self.symbols = self.dm.get_symbols()
     self.action_space = gym.spaces.Discrete(3)
     self.observation_space = gym.spaces.Box(low=-1,
                                             high=1,
                                             shape=(4, 30, 180))
     self.symbols = None
     self.final_cash_value = []
     #self.print_intro()
     self.avg_reward = 0
     self.episodes_ran = 0
     self.perm_symbols = [
         self.dm.current_symbol,
     ]
     self.returns = pd.DataFrame()
Example #11
0
def main():
    parser = ArgumentParser(
        'Preprocess robot data vor DeepVO, This process is destructive.')
    parser.add_argument(
        '-d',
        '--data',
        type=str,
        required=True,
        help='Path to dataset (a folder with "images" and "poses" subfolders.)'
    )
    parser.add_argument('-f',
                        '--to-float',
                        required=False,
                        default=False,
                        action='store_true',
                        help='Convert images array to float')
    parser.add_argument('-m',
                        '--mean-normalize',
                        required=False,
                        default=False,
                        action='store_true',
                        help='Subtract rgb mean from images')
    parser.add_argument('-s',
                        '--show',
                        required=False,
                        default=False,
                        action='store_true',
                        help='Show the images')
    parser.add_argument('-p',
                        '--pose',
                        required=False,
                        default=False,
                        action='store_true',
                        help='Add pi to poses (for range 0-2pi)')
    parser.add_argument('-sp',
                        '--subpi',
                        required=False,
                        default=False,
                        action='store_true',
                        help='Add pi to poses (for range -pi - +pi)')
    args = parser.parse_args()

    data_manager = DataManager(args.data,
                               dtype=np.float32,
                               batch_size=1,
                               sequence_length=1)
    if args.to_float:
        to_float(data_manager)

    if args.mean_normalize:
        mean_normalize(data_manager)

    if args.show:
        show_imgs(data_manager)

    if args.pose:
        add_pi_to_poses(data_manager)

    if args.subpi:
        sub_pi_from_poses(data_manager)
Example #12
0
def main(argv):
  manager = DataManager()
  manager.load()

  sess = tf.Session()
  #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
  
  model = VAE(beta=flags.beta,
              learning_rate=flags.learning_rate,
              alpha = flags.alpha,
              kappa = flags.kappa,
              lagrange_mult_param = flags.lagrange_mult_param,
              use_geco = flags.use_geco)
  
  sess.run(tf.global_variables_initializer())

  saver = load_checkpoints(sess)

  if flags.training:
    # Train
    train(sess, model, manager, saver)
  else:
    reconstruct_check_images = manager.get_random_images(10)
    # Image reconstruction check
    reconstruct_check(sess, model, reconstruct_check_images)
    # Disentangle check
    disentangle_check(sess, model, manager)
Example #13
0
def cross_validation():

    for dataset in datasets:
        print('INICIANDO VALIDAÇÃO PARA O %s' % dataset.name)

        hit_sum_test = []
        data_manager = DataManager(dataset)
        data_manager.load_data(categorical=False)

        for index in range(0, 20):
            x_TRAIN, y_TRAIN, x_VALIDATION, y_VALIDATION = data_manager.split_train_test_5fold(
                data_manager.X, data_manager.Y)
            hit_sum = []

            for fold in range(0, 5):
                rbf = ELM(number_of_hidden=1000)
                rbf.fit(x_TRAIN[fold], y_TRAIN[fold])
                hit_sum.append(
                    rbf.evaluate(x_VALIDATION[fold], y_VALIDATION[fold]))

            hit_sum_test.append(np.average(hit_sum))

        print('Accuracy: %.2f' % np.average(hit_sum_test))
        print('Min: %.2f' % np.min(hit_sum_test))
        print('Max: %.2f' % np.max(hit_sum_test))
        print('Stand De: %.2f%%' % np.std(hit_sum_test))
def build_data_manager(frame_folder, feature_folder, caption_file, key_frame_info_folder, save_path):
    data_manager = DataManager()
    data_manager.load_frame_path_info(frame_folder)
    data_manager.load_captions(caption_file)
    data_manager.load_key_frame_information(key_frame_info_folder)
    data_manager.load_features(feature_folder)
    data_manager.save(save_path)
Example #15
0
def test():
    '''
    Trains the model and returns its score
    '''
    matplotlib.rcParams['backend'] = 'Qt5Agg'
    matplotlib.get_backend()
    D = DataManager(data_name, data_dir)
    #Load le model
    mdl = model()

    Prepro = prepro.Preprocessor()
    #D.data['X_train'] = Prepro.removeOutliers(D.data['X_train'])
    #D.data['Y_train'] = Prepro.removeOutliers(D.data['Y_train'])
    X_train = D.data['X_train']
    Y_train = D.data['Y_train'].ravel()

    #test de l'entrainement
    mdl.fit(X_train, Y_train)

    #test de la prediction
    Y_hat_train = mdl.predict(D.data['X_train'])
    Y_hat_valid = mdl.predict(D.data['X_valid'])
    Y_hat_test = mdl.predict(D.data['X_test'])

    metric_name, scoring_function = get_metric()
    scores = cross_val_score(mdl,
                             X_train,
                             Y_train,
                             cv=5,
                             scoring=make_scorer(scoring_function))
    print('\nCV score (95 perc. CI): %0.2f (+/- %0.2f)' %
          (scores.mean(), scores.std() * 2))
Example #16
0
def main():
    dm = DataManager()
    train_x, train_y, val_x, val_y, test_x, test_y = dm.get_inputs(dim=5)

    # create model
    n_input_dimension = train_x.shape[1]
    n_labels = train_y.shape[1]

    model = Sequential()
    model.add(Dense(50, input_shape=(n_input_dimension, )))
    model.add(Dense(50))
    model.add(Dense(n_labels, activation='softmax'))
    optimizer = Adam(lr=0.0001)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # callbacks
    log_name = "log_" + datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    tb_path = "./logs/tensorboard/{}".format(log_name)
    tb = keras.callbacks.TensorBoard(log_dir=tb_path)
    es = EarlyStopping(monitor='val_loss', patience=10, verbose=1)

    # fit
    epochs = 10000
    batch_size = 100
    model.fit(train_x,
              train_y,
              epochs=epochs,
              batch_size=batch_size,
              validation_data=(val_x, val_y),
              callbacks=[tb, es])
def main(args, config):

    ### Set config for either gpu (on cluster) or cpu (on local machine)
    if args.gpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = os.environ['SGE_GPU'].replace(
            "\n", ",")
    else:
        pass
    seed = args.seed

    ### Set random seed for numpy ops
    np.random.seed(seed)
    # set random seed for tensorflow graph
    # must set random seed before creating session.
    # setting seed comes from this GitHubGist https://gist.github.com/tnq177/ce34bcf6b20243b0b5b23c78833e7945
    tf.reset_default_graph()
    tf.set_random_seed(seed)

    print("Creating DataManager and loading training and testing dataset")
    manager = DataManager(args)

    sess = tf.Session(config=config)
    print("Creating model...")
    model = VAE(args, manager)
    print("Model created!")

    sess.run(tf.global_variables_initializer())
    # initialize/reset all running variables
    sess.run(model.running_vars_initializer)

    #create saver for checkpoints saving
    saver = utils.create_checkpoint_saver(sess, args, model)
    print("Will Start Training Now")
    model.train(sess, manager, saver, args)
Example #18
0
def main():
  if len(sys.argv) < 3:
    print('Error: missing path to images and/or confidence threshold\nErro: ausencia do diretorio das imagens e/ou do limiar de confiança')
    exit(0)
  CONFIDENCE_THRES = float(sys.argv[2]) 
  if CONFIDENCE_THRES > 100 or CONFIDENCE_THRES < 0:
    print('Error: Give an Confidence Score Threshold between [0,100]\nErro: Indique um Limiar de Confiança entre [0,100]')
    exit(0)  
     
  data = DataManager()
  net_graph = tf.Graph()
  class_net_graph = tf.Graph()
  with net_graph.as_default():
    net = Network(data.WIDTH, data.HEIGHT, data.CHANNELS)
    net.restore_model('model')
  with class_net_graph.as_default():
    class_net = ClassNetwork(data.WIDTH, data.HEIGHT, data.CHANNELS)
    class_net.restore_model('model')
    
  for rel_path, subdirs, files in os.walk(sys.argv[1]):
    for name in sorted(files):
      if name[-3:] != 'png':
        continue
      full_path = os.path.join(rel_path, name)
      print('processing ' + full_path)
      img = cv2.imread(full_path, cv2.IMREAD_COLOR)
      img = run(net, class_net, data, img, CONFIDENCE_THRES)
      cv2.imwrite(full_path, img)
Example #19
0
def load_input_data():
    dm = DataManager(INPUT_FILE)
    train_feature = dm.get_data_for_variable("wind_60")
    train_label = dm.get_data_for_variable("CP07")
    # Output should be iterable with features, label for train and test
    # shape (1372, 2, 210)
    return train_feature, train_label
Example #20
0
def validate():

    for dataset in datasets:
        print('INICIANDO o plot da superfície de decisão PARA O %s' %
              dataset.name)

        data_manager = DataManager(dataset)
        data_manager.load_data(categorical=False)
        MSE = []
        RMSE = []

        for realization in range(0, 10):

            x_TRAIN, y_TRAIN, x_VALIDATION, y_VALIDATION = data_manager.split_train_test_5fold(
                data_manager.X, data_manager.Y)
            for i in range(0, 1):

                validation_perceptron = MultiLayerPerceptronRegressor(
                    activation='tanh', hidden_number=15, learning_rate=0.01)
                validation_perceptron.fit(x_TRAIN[i],
                                          y_TRAIN[i],
                                          dataset.value,
                                          epochs=300,
                                          verbose=True)
                mse, rmse = validation_perceptron.evaluate(
                    x_VALIDATION[i], y_VALIDATION[i])
                MSE.append(mse)
                RMSE.append(rmse)

        print('MSE: %.2f' % np.average(MSE))
        print('RMSE: %.2f' % np.average(RMSE))
        print('Stand De MSE: %.2f%%' % np.std(MSE))
        print('Stand De RMSE: %.2f%%' % np.std(RMSE))
Example #21
0
def run():

    for dataset in datasets:
        print('INICIANDO VALIDAÇÃO PARA O %s' % dataset.name)

        hit_sum = []
        data_manager = DataManager(dataset)
        data_manager.load_data(categorical=False)

        for i in range(1, 2):
            x_TRAIN, y_TRAIN, x_VALIDATION, y_VALIDATION = data_manager.split_train_test_5fold(
                data_manager.X, data_manager.Y)

            for fold in range(0, 2):

                rbf = ELM(number_of_hidden=1000)
                rbf.fit(x_TRAIN[fold], y_TRAIN[fold])
                hit_sum.append(
                    rbf.evaluate(x_VALIDATION[fold], y_VALIDATION[fold]))

        rmse = np.power(hit_sum, 0.5)
        print('MSE: %.25f' % np.average(hit_sum))
        print('MSE Std: %.25f' % np.std(hit_sum))
        print('RMSE: %.25f' % np.average(rmse))
        print('R0.0MSE Std: %.25f' % np.std(rmse))
Example #22
0
 def __init__(self):
     """
     Initialize all data managers.
     """
     self.data_manager_list = []
     for site_id in range(1, 11):
         self.data_manager_list.append(DataManager(site_id))
Example #23
0
def main(argv):
    manager = DataManager(FLAGS.data)
    manager.load()

    sess = tf.Session()

    normaliser = (FLAGS.latent_size / 10) * ((64 * 64) / manager.input_size)

    model = MODEL(latent_size=FLAGS.latent_size,
                  gamma=normaliser * FLAGS.gamma,
                  capacity_limit=FLAGS.capacity_limit,
                  capacity_change_duration=FLAGS.capacity_change_duration,
                  learning_rate=FLAGS.learning_rate,
                  n_channels=manager.n_channels)

    sess.run(tf.global_variables_initializer())

    saver = load_checkpoints(sess)

    if FLAGS.training:
        # Train
        train(sess, model, manager, saver)
    else:
        reconstruct_check_images = manager.get_random_images(10)
        # Image reconstruction check
        reconstruct_check(sess, model, reconstruct_check_images)
        # Disentangle check
        disentangle_check(sess, model, manager)
Example #24
0
def main(argv):
    np.random.seed(1)

    if not os.path.exists(flags.save_dir):
        os.mkdir(flags.save_dir)

    data_manager = DataManager()

    place_cells = PlaceCells()
    hd_cells = HDCells()

    data_manager.prepare(place_cells, hd_cells)

    model = Model(place_cell_size=place_cells.cell_size,
                  hd_cell_size=hd_cells.cell_size,
                  sequence_length=flags.sequence_length)

    trainer = Trainer(data_manager, model, flags)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # For Tensorboard log
    log_dir = flags.save_dir + "/log"
    summary_writer = tf.summary.FileWriter(log_dir, sess.graph)

    # Load checkpoints
    saver, start_step = load_checkpoints(sess)

    # Train
    train(sess, trainer, saver, summary_writer, start_step)
Example #25
0
    def train(self):
        data_manager = DataManager()
        passage_vocab_size = len(data_manager.passage_vocab.values())
        question_vocab_size = len(data_manager.passage_vocab.values())
        data_loader = DataLoader(
            data_manager, batch_size=1, shuffle=True, num_workers=4
        )
        model = AmandaModel(
            passage_vocab_size=passage_vocab_size,
            question_vocab_size=question_vocab_size
            
        )
        optimizer = torch.optim.Adamax(model.parameters())
        crt = torch.nn.CrossEntropyLoss()
        for epoch in range(self.max_epoch):
            print("epcoh {}".format(epoch))
            
            for index, batch in enumerate(data_loader):
                model.zero_grad()

                passage, question, spans = batch
                Pra = model(passage, question)
                loss = 0.
                for b_index in range(passage.shape[0]):
                    loss += crt(Pra[b_index], spans[b_index])
                print("loss ", loss)
                loss.backward()
                optimizer.step()
        pred = model(passage, question)
Example #26
0
def evaluate_map_data(map_resolution, environment):
    data_manager = DataManager(DEFAULT_MAP, "perfect")
    filter_types = ["normal_filter", "biased_normal_filter", "voxel_filter"]
    filter_translation_results = dict()
    filter_orientation_results = dict()
    filter_iter_results = dict()
    point_map = dict()
    for filter_name in filter_types:
        data_manager.load_data(filter_name)

        trans_mean = data_manager.data.post_translation_means()
        trans_std = data_manager.data.post_translation_std()
        filter_translation_results[filter_name] = (trans_mean, trans_std)

        orientation_mean = data_manager.data.post_rotation_means()
        orientation_std = data_manager.data.post_rotation_std()
        filter_orientation_results[filter_name] = (orientation_mean,
                                                   orientation_std)

        iter_mean = data_manager.data.iter_means()
        iter_std = data_manager.data.iter_std()
        filter_iter_results[filter_name] = (iter_mean, iter_std)

        point_map[filter_name] = data_manager.data.positions
        data_manager.readonly_close()

    result_plotter.plot_filter_statistics_translation(
        filter_translation_results, map_resolution)
    result_plotter.plot_filter_statistics_orientation(
        filter_orientation_results, map_resolution)
    result_plotter.plot_filter_statistics_iter(filter_iter_results,
                                               map_resolution)
    result_plotter.plot_points_on_map(environment, point_map)
Example #27
0
def run():
    """
    """
    logger = init()
    
    try:
        data = DataManager(path_to_raw)
        model = ModelManager(data, augment_data)

        logger.info(f"Creating new folder in {results_path} to save model object and results...")
        folder_name = time.strftime("%Y-%m-%d_%H-%M-%S_RUN")
        mkdir(results_path+folder_name)
        logger.info(f"{folder_name} created in {results_path}.")

        logger.info(f"Saving model object in the created folder...")
        with open(results_path + folder_name + "/" + model_name, "wb") as output:
            pickle.dump(model, output, pickle.HIGHEST_PROTOCOL)
        logger.info("Saving of model object completed.")

        if generate_results_report :
            report_file = open(results_path + folder_name + "/" + "Run_Report.txt", "w")
            report_file.write(write_report_from_run(model, augment_data))
            report_file.close()
        
        logger.info("Main program completed successfully.")
        return model

    except Exception as e:
        logger.error("An error occured, program will be interrupted. Details : " + str(e))
        raise e 
def test():
    input_dir = '../../datasets/automl/'  # Change this to the directory containing AutoML datasets
    if not os.path.isdir(input_dir):
        raise ValueError(
            "input_dir not found. You can change this value in your ")
    small_datasets = ['jasmine', 'dexter', 'adult', 'cadata', 'arturo']
    filepath = './sample-haha'
    dataset_name = np.random.choice(small_datasets)
    set_type = np.random.choice(['train', 'test'])
    # set_type = 'test'
    D = DataManager(dataset_name,
                    input_dir,
                    replace_missing=False,
                    verbose=verbose)
    D_info = D.info
    print("dataset_name={}, set_type={}, sparse or dense: {}".format(
        dataset_name, set_type, D.info['format']))
    metadata, features, labels = _prepare_metadata_features_and_labels(
        D, set_type=set_type)
    convert_vectors_to_sequence_example(filepath,
                                        metadata,
                                        features,
                                        labels,
                                        D_info,
                                        max_num_examples=None,
                                        num_shards=1)
    print_first_sequence_example(filepath)
    pprint(D.info)
    print("Now you should see 2 or 3 new files in current directory. :)")
Example #29
0
def generate_map_data(grid_map, environment, map_name, dir_name, prefix,
                      sample_count, cloud_size, rangefinder_noise, points):

    data_manager = DataManager(map_name, dir_name, prefix)

    point_cloud_filters = [
        NormalFilter(number_of_bins=20),
        BiasedNormalFilter(number_of_bins=20),
        AdaptivelyVoxelFilter(2 * grid_map.size),
        RandomFilter()
    ]

    #points = [(1.5, 1.5), (3, 1.5), (4.5, 1.5),
    #          (1.5, 2.5), (3, 3), (5, 2.4),
    #          (3, 3), (5, 3.5),
    #          (3.5, 5),
    #          (1.5, 7), (3.5, 7),
    #          (2.5, 8), (4, 8), (5, 8.5),
    #          (6.5, 8.5), (8, 8)]

    #points = [(1.5, 1.5), (2, 1.5), (2.5, 1.5), (3, 1.5), (3.5, 1.5),
    #         (4, 1.5), (4.5, 1.5), (5, 1.5), (5.5, 1.5), (6, 1.5),
    #        (6.5, 1.5), (7, 1.5), (7.5, 1.5), (8, 1.5), (8.5, 1.5)]
    #points = [(5.5, 5.5)]

    for point in points:
        print("Process point: ", point)
        rangefinder = Rangefinder(cloud_size,
                                  range_variance=rangefinder_noise,
                                  angular_variance=rangefinder_noise)
        point_cloud = rangefinder.scan(environment, point)
        point_cloud.calc_normals()
        evaluate_filters(grid_map, point_cloud, point_cloud_filters,
                         data_manager, sample_count)
 def setUp(self):
     db_file = "YourAppointment.db"
     if os.path.exists(db_file):
         os.remove(db_file)
     self.dm = DataManager()
     with redirect_stdout(StringIO()) as stdout:
         self.dm.db.create_table_customers()