def load_or_gen_list_dict(rec_f, rec): cls_idx_dict_f = os.path.splitext(rec_f)[0] + '.json' idx_cls_lst_f = os.path.splitext(rec_f)[0] + '-lst.json' if os.path.exists(cls_idx_dict_f) and os.path.exists(idx_cls_lst_f): idx_cls_lst = load_json(idx_cls_lst_f) cls_idx_dict = load_json(cls_idx_dict_f) idx_cls_lst = [int(item) for item in idx_cls_lst] keys = list(cls_idx_dict.keys()) for k in keys: cls_idx_dict[int(k)] = cls_idx_dict.pop(k) return idx_cls_lst, cls_idx_dict else: idx_cls_lst = [] for idx in rec.idx.keys(): record = rec.read_idx(idx) h, _ = recordio.unpack(record) idx_cls_lst.append([idx, int(h.label)]) cls_idx_dict = {} for idx, y in idx_cls_lst: if y in cls_idx_dict: cls_idx_dict[y].append(idx) else: cls_idx_dict[y] = [idx] idx_cls_lst = [int(l) for _, l in idx_cls_lst] save_json(idx_cls_lst, idx_cls_lst_f) save_json(cls_idx_dict, cls_idx_dict_f) return idx_cls_lst, cls_idx_dict
def load_or_gen_dict(rec_f, rec): cls_idx_dict_f = os.path.splitext(rec_f)[0] + '.json' if os.path.exists(cls_idx_dict_f): cls_idx_dict = load_json(cls_idx_dict_f) keys = list(cls_idx_dict.keys()) for k in keys: cls_idx_dict[int(k)] = cls_idx_dict.pop(k) return cls_idx_dict else: idx_cls_lst = [] for idx in rec.idx.keys(): record = rec.read_idx(idx) h, _ = recordio.unpack(record) idx_cls_lst.append([idx, h.label]) cls_idx_dict = {} for idx, y in idx_cls_lst: y = int(y) if y in cls_idx_dict: cls_idx_dict[y].append(idx) else: cls_idx_dict[y] = [idx] save_json(cls_idx_dict, os.path.splitext(rec_f)[0] + '.json') return cls_idx_dict
def _save_metadata(self): logging.info(self.args) meta_data_dict = { "args": vars(self.args), "optimizer": self.lr_scheduler.optimizer.state_dict(), "scheduler": self.lr_scheduler.state_dict(), "model": "%s" % self.model } save(meta_data_dict, self.files['metadata']) save_json(meta_data_dict, self.files['metadata'] + '.json') logging.info(meta_data_dict)
def gen_cls_idx_dict(): logits = np.loadtxt(args.preds) if len(logits.shape) == 2: preds = np.argmax(logits, axis=1) else: preds = logits cls_idx_dict = {} for i, y_hat in enumerate(preds): if y_hat in cls_idx_dict: cls_idx_dict[int(y_hat)].append(i) else: cls_idx_dict[int(y_hat)] = [i] save_json(cls_idx_dict, args.out)
def load_or_gen_list(rec_f, rec): idx_cls_lst_f = os.path.splitext(rec_f)[0] + '-lst.json' if os.path.exists(idx_cls_lst_f): idx_cls_lst = load_json(idx_cls_lst_f) idx_cls_lst = [int(l) for l in idx_cls_lst] return idx_cls_lst else: idx_cls_lst = [] for idx in rec.idx.keys(): record = rec.read_idx(idx) h, _ = recordio.unpack(record) idx_cls_lst.append(int(h.label)) save_json(idx_cls_lst, idx_cls_lst_f) return idx_cls_lst
def train( train_data, val_data, test_data, model: keras.Model, save_dir: pathlib.Path, config: Config, category_taxonomy: Taxonomy, category_names: List[str], ): print("Starting training...") temporary_log_dir = pathlib.Path(tempfile.mkdtemp()) print("Temporary log directory: {}".format(temporary_log_dir)) X_train, y_train = train_data X_val, y_val = val_data X_test, y_test = test_data model.fit( X_train, y_train, batch_size=config.train_config.batch_size, epochs=config.train_config.epochs, validation_data=(X_val, y_val), callbacks=[ callbacks.TerminateOnNaN(), callbacks.ModelCheckpoint( filepath=str(save_dir / "weights.{epoch:02d}-{val_loss:.4f}.hdf5"), monitor="val_loss", save_best_only=True, ), callbacks.TensorBoard(log_dir=str(temporary_log_dir), histogram_freq=2), callbacks.EarlyStopping(monitor="val_loss", patience=4), callbacks.CSVLogger(str(save_dir / "training.csv")), ], ) print("Training ended") log_dir = save_dir / "logs" print("Moving log directory from {} to {}".format(temporary_log_dir, log_dir)) shutil.move(str(temporary_log_dir), str(log_dir)) model.save(str(save_dir / "last_checkpoint.hdf5")) last_checkpoint_path = sorted(save_dir.glob("weights.*.hdf5"))[-1] print("Restoring last checkpoint {}".format(last_checkpoint_path)) model = keras.models.load_model(str(last_checkpoint_path)) print("Evaluating on validation dataset") y_pred_val = model.predict(X_val) report, clf_report = evaluation_report(y_val, y_pred_val, taxonomy=category_taxonomy, category_names=category_names) save_json(report, save_dir / "metrics_val.json") save_json(clf_report, save_dir / "classification_report_val.json") y_pred_test = model.predict(X_test) report, clf_report = evaluation_report(y_test, y_pred_test, taxonomy=category_taxonomy, category_names=category_names) save_json(report, save_dir / "metrics_test.json") save_json(clf_report, save_dir / "classification_report_test.json")
args.water_depth = 10 # 10 args.initial_stimulus = 1 # 1 args.coriolis_force = 0.0 # 0 args.water_viscocity = 10e-6 # 0 args.TIME = 1.0 # 1 args.dt = 0.01 # 0.01 args.data_points = 5 args.image_size_x = args.image_size_y = 184 else: plot = False args = get_args() if not os.path.isdir(args.location): os.mkdir(args.location) save_json(vars(args), os.path.join(args.location, 'parameters.json')) def hillshade(array, azimuth, angle_altitude): x, y = np.gradient(array) slope = np.pi / 2. - np.arctan(np.sqrt(x * x + y * y)) aspect = np.arctan2(-x, y) azimuthrad = azimuth * np.pi / 180. altituderad = angle_altitude * np.pi / 180. shaded = np.sin(altituderad) * np.sin(slope) + \ np.cos(altituderad) * np.cos(slope) * \ np.cos(azimuthrad - aspect) the_range = np.sort(np.reshape(shaded, -1)) minimum = the_range[int(0.005 * len(the_range))]
def save_to_file(self, file): save(self, file) # save_json(self, file + '.json') save_json(self.state, file + '.state.json')
X, y = generate_data_from_df( df, ingredient_to_id, category_to_id, product_name_vocabulary, nlp=nlp, product_name_max_length=config.model_config.product_name_max_length, product_name_preprocessing_config=config.product_name_preprocessing_config, nutriments_input=config.model_config.nutriment_input, ) y_pred = model.predict(X) category_taxonomy = Taxonomy.from_json(settings.CATEGORY_TAXONOMY_PATH) report, clf_report = evaluation_report(y, y_pred, taxonomy=category_taxonomy, category_names=category_names) output_prefix = args.output_prefix if output_prefix: output_prefix += "_" save_json(report, model_dir / "{}metrics_{}.json".format(output_prefix, eval_type)) save_json( clf_report, model_dir / "{}classification_report_{}.json".format(output_prefix, eval_type), )