def __init__(self, path, batch_size): super().__init__(path, batch_size) self.session_paths = [] self.stats = None self.stat_list = [] for p in self.paths: if "Session" in p: self.session_paths.append(p) elif "stats" in p: with open(p, 'rb') as handle: self.stat_list.append(pickle.load(handle)) if len(self.stat_list) == 0: print("could not find stats!") self.set_stats() self.set_name(path) self.classifier = model.get_classifier(int( self.stats['num_classes'])).to(device) self.session_paths = np.random.permutation(self.session_paths).tolist() self.train_paths = self.session_paths[:TRAIN_SESSIONS] self.val_paths = self.session_paths[TRAIN_SESSIONS:] self.train_dataset = dataset_(self.train_paths) self.val_dataset = dataset_(self.val_paths) self.train_dataloader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True) self.val_dataloader = DataLoader(self.val_dataset, batch_size=batch_size, shuffle=True) self.trainloader_iterator = iter(self.train_dataloader) self.valloader_iterator = iter(self.val_dataloader)
def __init__(self, hparams): super().__init__(hparams) from model.utils import model_init from model.utils import freeze self.num = hparams["model_num"] self.models = load_models(hparams) assert len(self.models) == self.num if not hparams["train_teachers"]: freeze(self.models) self.models.cuda() self.large_gens = nn.ModuleList([self._make_large_generator(size * 4, size) for size in [64, 128, 256, 512]]) self.small_gens = nn.ModuleList( [nn.ModuleList([copy.copy(self._make_small_generator(size, size * 4)) for i in range(self.num)]) for size in [64, 128, 256, 512]]) if not hparams["train_gens"]: freeze(self.large_gens) freeze(self.small_gens) else: model_init(self.large_gens) model_init(self.small_gens) self.student = get_classifier(hparams["backbone"], hparams["dataset"]) if not hparams["train_student"]: freeze(self.student) else: model_init(self.student) from model.feature_similarity_measurement import cka_loss self.feature_loss = cka_loss()
def __init__(self, hparams): LightningModule.__init__(self, hparams) self.scale = hparams['scale'] self.self_ensemble = hparams['self_ensemble'] self.model = get_classifier(hparams["backbone"], hparams["dataset"]) self.teacher = self.load_teacher() self.dist_method = self.get_distillation_module()
def predict(date, time): classifier = model.get_classifier() file = open("../server/data/" + date, "r") lines = file.readlines() file.close() line_size = model.FEATURE_SIZE + model.LABEL_PAD if time == None: last_lines = lines[line_size * -1:] else: for i in range(len(lines)): if lines[i].startswith(time) and i >= line_size: last_lines = lines[i-line_size:i] break; else: return "Time is not valued" q = [] for line in last_lines: origin_item = data_filter.parse_line(line) q.append(origin_item) add_item = origin_item[:] add_item[0] = q[-1:][0][0] + 1 q.append(add_item) item = data_filter.cal_que(q) if item is None: return "Data is bad" item.pop() last_time = origin_item[0] predict_x = {} for i in range(len(item)): predict_x["C" + str(i)] = [] predict_x["C" + str(i)].append(item[i]) predictions = classifier.predict( input_fn=lambda:data_loader.eval_input_fn(predict_x, labels=None, batch_size=100)) for pred_dict in predictions: template = ('"time":{}, "label":{}, "probability":{:.10f}') class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] return "{" + template.format(last_time, model.LABELS[class_id], 100 * probability) + "}"
def __init__(self, hparams): super().__init__(hparams) self.M_maps = [] self.teacher_model: ResNet_CIFAR = get_classifier( hparams["backbone"], hparams["dataset"]) freeze(self.teacher_model.eval()) self.plane_model = nn.ModuleList() self.teacher_start_layer = 0 self.last_channel = self.params['input_channel'] self.init_student()
def train_classifier(): train_generator = NoteIsoSequence(train_wav_files, sample_duration=sample_duration, sample_rate=sample_rate, n_fft=n_fft, batch_size=batch_size, epsilon=epsilon, song_indices=song_indices, instr_indices=instr_indices, note_indices=note_indices) encoder, classifier = get_classifier() classifier.summary() test_generator = NoteIsoSequence(test_wav_files, sample_duration=sample_duration, sample_rate=sample_rate, n_fft=n_fft, batch_size=batch_size, epsilon=epsilon, song_indices=song_indices, instr_indices=instr_indices, note_indices=note_indices) now = datetime.now() log_dir = "logs/ae+-" + now.strftime("%Y-%m-%d-%H:%M:%S") + "/" callbacks = [ TensorBoardWrapper(test_generator, log_dir=log_dir, nb_steps=5, histogram_freq=1, batch_size=batch_size, write_graph=False, write_grads=True, write_images=False) ] classifier.fit_generator(generator=train_generator, validation_data=test_generator, use_multiprocessing=use_multiprocessing, workers=workers, epochs=epochs, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, callbacks=callbacks) print("saving models...") encoder.save("ae+/encoder-5.h") classifier.save("ae+/classifier-5.h") print("saved ae+.")
def __init__(self, hparams): super().__init__(hparams) from model.utils import model_init from model.utils import freeze self.models = nn.ModuleList( [get_classifier(hparams["classifiers"][i], hparams["dataset"]) for i in range(self.num)]) for idx, model in enumerate(self.models): checkpoint = torch.load(hparams["pretrain_paths"][idx], map_location='cpu') model.load_state_dict({key[6:]: value for key, value in checkpoint["state_dict"].items()}) freeze(model) model.cuda() self.channels = [32, 64, 128, 256, 512] assert 1 <= hparams["pos"] <= 5 self.gen0 = self._make_generator(self.channels[hparams["pos"]], hparams["gen_level"]) self.gen1 = self._make_generator(self.channels[hparams["pos"]], hparams["gen_level"]) model_init(self.gen0) model_init(self.gen1)
def main(argv): classifier = model.get_classifier() items = [ [6.0,20.0426,5.0,1.6884,3.0,1.0,2.0,1.0999,0.0,1.0,6.0,20.0426,5.0,1.6884,3.0,1.0,2.0,1.0999,0.0,3.94,7.0,52.0,6.0,20.0426,5.0,3.9721,2.0,0.0999,0.0,2.94,7.0,53.0,6.0,20.0426,5.0,3.9721,2.0,0.0999,0.0,2.94,7.0,52.0,6.0,21.0426,5.0,3.9721,2.0,0.0999,1.0,2.94,7.0,52.0,6.0,21.0426,5.0,4.31,3.0,2.94,2.0,0.0999,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,5.88,2.0,0.0999,7.0,51.0,6.0,21.0426,5.0,23.6784,4.0,5.5616,2.0,0.0999,7.0,51.0,6.0,21.0426,5.0,1.6884,4.0,6.5616,2.0,0.0526,8.0,23.9563,7.0,52.0,6.0,20.0426,5.0,29.6884,4.0,6.3796,8.0,23.9563,7.0,51.0,6.0,20.0426,5.0,14.946,4.0,6.3638,8.0,23.9563,7.0,52.0,6.0,20.0426,5.0,14.946,4.0,4.706,8.0,34.9563,7.0,52.0,6.0,20.0426,5.0,2.6884,4.0,6.4943,7.0,52.0,6.0,20.0426,5.0,24.6684,4.0,6.338,3.0,0.8956,8.0,34.9563,7.0,52.0,6.0,20.0426,5.0,13.946,4.0,7.2696,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,7.2543,8.0,34.9563,7.0,53.0,6.0,20.0426,5.0,1.6884,4.0,3.8636,8.0,34.9563,7.0,53.0,6.0,20.0426,5.0,16.5676,4.0,3.8553,8.0,34.9563,7.0,52.0,6.0,20.0426,5.0,4.31,4.0,3.8333,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,3.8333,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,3.9008,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,4.711,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,3.0,3.883,8.0,34.9563,7.0,52.0,6.0,20.0426,5.0,1.6884,3.0,3.8992,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,4.339,3.0,3.8724,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,33.319,3.0,3.8616,10.0,17.6222,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,5.4575,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,5.339,2.0,4.1805,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.8068,2.0,1.1828,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,6.0647,1.0,2.94,5] ,[6.0,20.0426,5.0,1.6884,3.0,1.0,2.0,1.0999,0.0,3.94,7.0,52.0,6.0,20.0426,5.0,3.9721,2.0,0.0999,0.0,2.94,7.0,53.0,6.0,20.0426,5.0,3.9721,2.0,0.0999,0.0,2.94,7.0,52.0,6.0,21.0426,5.0,3.9721,2.0,0.0999,1.0,2.94,7.0,52.0,6.0,21.0426,5.0,4.31,3.0,2.94,2.0,0.0999,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,5.88,2.0,0.0999,7.0,51.0,6.0,21.0426,5.0,23.6784,4.0,5.5616,2.0,0.0999,7.0,51.0,6.0,21.0426,5.0,1.6884,4.0,6.5616,2.0,0.0526,8.0,23.9563,7.0,52.0,6.0,20.0426,5.0,29.6884,4.0,6.3796,8.0,23.9563,7.0,51.0,6.0,20.0426,5.0,14.946,4.0,6.3638,8.0,23.9563,7.0,52.0,6.0,20.0426,5.0,14.946,4.0,4.706,8.0,34.9563,7.0,52.0,6.0,20.0426,5.0,2.6884,4.0,6.4943,7.0,52.0,6.0,20.0426,5.0,24.6684,4.0,6.338,3.0,0.8956,8.0,34.9563,7.0,52.0,6.0,20.0426,5.0,13.946,4.0,7.2696,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,7.2543,8.0,34.9563,7.0,53.0,6.0,20.0426,5.0,1.6884,4.0,3.8636,8.0,34.9563,7.0,53.0,6.0,20.0426,5.0,16.5676,4.0,3.8553,8.0,34.9563,7.0,52.0,6.0,20.0426,5.0,4.31,4.0,3.8333,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,3.8333,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,3.9008,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,4.0,4.711,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.6884,3.0,3.883,8.0,34.9563,7.0,52.0,6.0,20.0426,5.0,1.6884,3.0,3.8992,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,4.339,3.0,3.8724,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,33.319,3.0,3.8616,10.0,17.6222,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,5.4575,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,5.339,2.0,4.1805,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,1.8068,2.0,1.1828,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,6.0647,1.0,2.94,8.0,34.9563,7.0,51.0,6.0,20.0426,5.0,26.0447,2.0,1.1828,5] ,[9.0,23.9563,8.0,54.0723,7.0,20.0426,6.0,29.0447,4.0,2.94,9.0,23.9563,8.0,54.0723,7.0,20.0426,6.0,1.9584,4.0,2.94,9.0,23.9563,8.0,53.0723,7.0,21.0426,6.0,18.3223,4.0,2.94,9.0,23.9563,8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,1.172,8.0,50.615,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,0.671,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,6.0647,4.0,2.94,8.0,51.82,7.0,20.0426,6.0,6.0647,4.0,2.94,2.0,0.6706,9.0,34.9563,8.0,51.82,7.0,20.0426,6.0,6.0647,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,21.0681,6.0,6.0647,2.0,3.6103,11.0,17.0258,9.0,34.9563,8.0,51.0,7.0,43.0226,6.0,6.0647,8.0,51.0,7.0,20.0426,6.0,3.1918,5.0,0.0206,2.0,2.1176,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.94,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.2296,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,0.7599,8.0,51.0,7.0,20.0426,6.0,25.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,31.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,50.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.2073,8.0,50.0,7.0,27.1503,6.0,24.6684,5.0,1.4599,4.0,1.2073,8.0,51.0,7.0,20.587,6.0,21.6684,5.0,2.1599,3.0,2.863,8.0,50.0,7.0,20.0426,6.0,22.2128,5.0,2.1599,3.0,2.814,8.0,50.0,7.0,21.0426,6.0,25.2128,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,29.2328,5.0,2.1599,3.0,2.1136,5] ,[9.0,23.9563,8.0,54.0723,7.0,20.0426,6.0,1.9584,4.0,2.94,9.0,23.9563,8.0,53.0723,7.0,21.0426,6.0,18.3223,4.0,2.94,9.0,23.9563,8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,1.172,8.0,50.615,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,0.671,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,6.0647,4.0,2.94,8.0,51.82,7.0,20.0426,6.0,6.0647,4.0,2.94,2.0,0.6706,9.0,34.9563,8.0,51.82,7.0,20.0426,6.0,6.0647,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,21.0681,6.0,6.0647,2.0,3.6103,11.0,17.0258,9.0,34.9563,8.0,51.0,7.0,43.0226,6.0,6.0647,8.0,51.0,7.0,20.0426,6.0,3.1918,5.0,0.0206,2.0,2.1176,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.94,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.2296,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,0.7599,8.0,51.0,7.0,20.0426,6.0,25.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,31.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,50.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.2073,8.0,50.0,7.0,27.1503,6.0,24.6684,5.0,1.4599,4.0,1.2073,8.0,51.0,7.0,20.587,6.0,21.6684,5.0,2.1599,3.0,2.863,8.0,50.0,7.0,20.0426,6.0,22.2128,5.0,2.1599,3.0,2.814,8.0,50.0,7.0,21.0426,6.0,25.2128,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,29.2328,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,2.1599,3.0,1.4132,5] ,[9.0,23.9563,8.0,53.0723,7.0,21.0426,6.0,18.3223,4.0,2.94,9.0,23.9563,8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,1.172,8.0,50.615,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,0.671,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,6.0647,4.0,2.94,8.0,51.82,7.0,20.0426,6.0,6.0647,4.0,2.94,2.0,0.6706,9.0,34.9563,8.0,51.82,7.0,20.0426,6.0,6.0647,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,21.0681,6.0,6.0647,2.0,3.6103,11.0,17.0258,9.0,34.9563,8.0,51.0,7.0,43.0226,6.0,6.0647,8.0,51.0,7.0,20.0426,6.0,3.1918,5.0,0.0206,2.0,2.1176,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.94,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.2296,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,0.7599,8.0,51.0,7.0,20.0426,6.0,25.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,31.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,50.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.2073,8.0,50.0,7.0,27.1503,6.0,24.6684,5.0,1.4599,4.0,1.2073,8.0,51.0,7.0,20.587,6.0,21.6684,5.0,2.1599,3.0,2.863,8.0,50.0,7.0,20.0426,6.0,22.2128,5.0,2.1599,3.0,2.814,8.0,50.0,7.0,21.0426,6.0,25.2128,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,29.2328,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,2.1599,3.0,1.4132,9.0,23.9563,8.0,50.0,7.0,32.0426,6.0,27.2228,5.0,2.8599,5] ,[9.0,23.9563,8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,1.172,8.0,50.615,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,0.671,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,6.0647,4.0,2.94,8.0,51.82,7.0,20.0426,6.0,6.0647,4.0,2.94,2.0,0.6706,9.0,34.9563,8.0,51.82,7.0,20.0426,6.0,6.0647,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,21.0681,6.0,6.0647,2.0,3.6103,11.0,17.0258,9.0,34.9563,8.0,51.0,7.0,43.0226,6.0,6.0647,8.0,51.0,7.0,20.0426,6.0,3.1918,5.0,0.0206,2.0,2.1176,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.94,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.2296,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,0.7599,8.0,51.0,7.0,20.0426,6.0,25.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,31.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,50.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.2073,8.0,50.0,7.0,27.1503,6.0,24.6684,5.0,1.4599,4.0,1.2073,8.0,51.0,7.0,20.587,6.0,21.6684,5.0,2.1599,3.0,2.863,8.0,50.0,7.0,20.0426,6.0,22.2128,5.0,2.1599,3.0,2.814,8.0,50.0,7.0,21.0426,6.0,25.2128,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,29.2328,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,2.1599,3.0,1.4132,9.0,23.9563,8.0,50.0,7.0,32.0426,6.0,27.2228,5.0,2.8599,9.0,23.9563,8.0,50.0,7.0,32.0426,6.0,14.4904,5.0,4.376,5] ,[8.0,53.0723,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,1.172,8.0,50.615,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,0.671,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,6.0647,4.0,2.94,8.0,51.82,7.0,20.0426,6.0,6.0647,4.0,2.94,2.0,0.6706,9.0,34.9563,8.0,51.82,7.0,20.0426,6.0,6.0647,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,21.0681,6.0,6.0647,2.0,3.6103,11.0,17.0258,9.0,34.9563,8.0,51.0,7.0,43.0226,6.0,6.0647,8.0,51.0,7.0,20.0426,6.0,3.1918,5.0,0.0206,2.0,2.1176,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.94,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.2296,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,0.7599,8.0,51.0,7.0,20.0426,6.0,25.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,31.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,50.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.2073,8.0,50.0,7.0,27.1503,6.0,24.6684,5.0,1.4599,4.0,1.2073,8.0,51.0,7.0,20.587,6.0,21.6684,5.0,2.1599,3.0,2.863,8.0,50.0,7.0,20.0426,6.0,22.2128,5.0,2.1599,3.0,2.814,8.0,50.0,7.0,21.0426,6.0,25.2128,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,29.2328,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,2.1599,3.0,1.4132,9.0,23.9563,8.0,50.0,7.0,32.0426,6.0,27.2228,5.0,2.8599,9.0,23.9563,8.0,50.0,7.0,32.0426,6.0,14.4904,5.0,4.376,8.0,50.0,7.0,32.0426,6.0,14.4904,5.0,5.9465,4.0,2.94,5] ,[8.0,50.615,7.0,21.0426,6.0,6.0647,4.0,2.94,2.0,0.671,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,6.0647,4.0,2.94,8.0,51.82,7.0,20.0426,6.0,6.0647,4.0,2.94,2.0,0.6706,9.0,34.9563,8.0,51.82,7.0,20.0426,6.0,6.0647,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,21.0681,6.0,6.0647,2.0,3.6103,11.0,17.0258,9.0,34.9563,8.0,51.0,7.0,43.0226,6.0,6.0647,8.0,51.0,7.0,20.0426,6.0,3.1918,5.0,0.0206,2.0,2.1176,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.94,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,3.0,2.2296,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.0206,2.0,2.94,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,9.0,34.9563,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,0.7599,8.0,51.0,7.0,20.0426,6.0,25.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,31.2128,5.0,0.7599,4.0,2.2396,8.0,51.0,7.0,20.0426,6.0,2.2328,5.0,0.7599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1623,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,50.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.1612,8.0,51.0,7.0,20.587,6.0,1.6884,5.0,1.4599,4.0,1.2073,8.0,50.0,7.0,27.1503,6.0,24.6684,5.0,1.4599,4.0,1.2073,8.0,51.0,7.0,20.587,6.0,21.6684,5.0,2.1599,3.0,2.863,8.0,50.0,7.0,20.0426,6.0,22.2128,5.0,2.1599,3.0,2.814,8.0,50.0,7.0,21.0426,6.0,25.2128,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,29.2328,5.0,2.1599,3.0,2.1136,8.0,51.0,7.0,20.0426,6.0,14.4904,5.0,2.1599,3.0,1.4132,9.0,23.9563,8.0,50.0,7.0,32.0426,6.0,27.2228,5.0,2.8599,9.0,23.9563,8.0,50.0,7.0,32.0426,6.0,14.4904,5.0,4.376,8.0,50.0,7.0,32.0426,6.0,14.4904,5.0,5.9465,4.0,2.94,8.0,51.0,7.0,31.0426,6.0,25.2128,5.0,4.376,4.0,3.8101,5] ] predict_x = {} expected = [] column_names = model.get_column_names(); for i in range(len(column_names)): predict_x[column_names[i]] = [] for item in items: predict_x[column_names[i]].append(item[i]) for item in items: expected.append(item[len(item) - 1]) predictions = classifier.predict( input_fn=lambda:data_loader.eval_input_fn(predict_x, labels=None, batch_size=100)) for pred_dict, expec in zip(predictions, expected): template = ('predict: {},{:.10f}, expected: {}') class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(model.LABELS[class_id], 100 * probability, model.LABELS[expec]))
def __init__(self, path, batch_size): super().__init__(path, batch_size) self.batch_size = batch_size self.train_paths = self.paths[0] self.stats = {'num_classes': 4, 'num_channels': 22, 'length': 250} self.val_paths = self.paths[1] self.name = path[path.rfind('\\') + 4] + '\'' + 'Comp' self.classifier = model.get_classifier(4).to(device) self.classifier_id = 'Comp' self.filter_id = 'Comp' self.train_dataset = dataset_(self.train_paths) self.val_dataset = dataset_(self.val_paths) self.train_dataloader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True) self.val_dataloader = DataLoader(self.val_dataset, batch_size=batch_size, shuffle=True) self.trainloader_iterator = iter(self.train_dataloader) self.valloader_iterator = iter(self.val_dataloader)
def main(do_test_run=False, resume_checkpoint=None, run_method='alternate_train_and_validate', **kwargs): """This is the main entry of the script. It's main responsibility is to create an Engine object using the default config that is modified by the kwargs given to this function. By default it creates an Engine with the no preloaded weights for the model and optimizer. When `resume_checkpoint` is set to the path to a checkpoint file, this checkpoint file will be used. By default, the `Engine#alternate_train_and_validate` method will be called. This can be modified by the `run_method` parameter of this function. """ config.update(kwargs) if os.path.exists(config['pwd']): if input(f"{config['pwd']} already exists, are you sure? [y/N]: " ) != 'y': return print(f"config =\n{config}") cnn_model = get_cnn(config) classifier_model = get_classifier(config) detective_model = get_detective(config) if resume_checkpoint is not None: info(f"loading state_dict for the engine from {resume_checkpoint} ...") the_state_dict = torch.load(resume_checkpoint) cnn_model.load_state_dict(the_state_dict['cnn_model']) classifier_model.load_state_dict(the_state_dict['classifier_model']) detective_model.load_state_dict(the_state_dict['detective_model']) info(f"loaded state_dict for the engine from {resume_checkpoint}.") engine = Engine( cnn_model=cnn_model, cnn_optimizer=(torch.optim.Adam, { 'lr': 3e-4 }), classifier_model=classifier_model, classifier_optimizer=(torch.optim.Adam, { 'lr': 3e-4 }), classifier_loss_fn=nn.BCEWithLogitsLoss(), detective_model=detective_model, detective_optimizer=(torch.optim.Adam, { 'lr': 3e-4 }), detective_loss_fn=nn.SmoothL1Loss(), max_epochs=config['max_epochs'], loader_pair=get_loader_pair(config), plugins=[ Timestamp(), TrainingMetrics( classifier_metrics={ # 'loss': loss, # 'acc': acc, }, detective_metrics={ 'loss': loss, }, residual_factor=max(1 - config['train_batch_size'] / 10000, 0), ), ValidationMetrics( classifier_metrics={ # 'loss': loss, # 'acc': acc, # 'std': std, # 'auc': auc, }, detective_metrics={ 'loss': loss, 'std': std, }, ), ReduceLROnPlateau(), Checkpoint(), Messages(), ], device=config['device'], pwd=config['pwd'], ) if do_test_run: engine.__getattribute__(run_method)(test_run=True) engine.reset() engine.__getattribute__(run_method)()
def __init__(self, hparams ): # must name after hparams or there will be plenty of bugs super().__init__(hparams) self.model = get_classifier(hparams["backbone"], hparams["dataset"])
from model import get_classifier from torch import nn import torch if __name__ == '__main__': params = { 'arch': 'resnet20_layerwise', } model = get_classifier(params, "cifar10") from model.basic_cifar_models.resnet_layerwise_cifar import BasicBlock_1, BasicBlock_2, ConvBNReLULayer x_test = torch.randn((16, 3, 32, 32)) for layer in model.sequential_models: if isinstance(layer, (BasicBlock_1, BasicBlock_2, ConvBNReLULayer)): conv = layer.simplify_layer()[0] assert isinstance(conv, nn.Conv2d) with torch.no_grad(): ans = layer(x_test) out = nn.ReLU()(conv(x_test)) diff_max = (ans - out).abs().max() print("diff_max = ", diff_max) # this should be smaller than 1e-5 x_test = layer(x_test) # model.cuda().eval() # x_test = torch.randn((128, 3, 32, 32)).float().cuda() # with torch.no_grad(): # feats, outs = model(x_test, with_feature=True)
def main(): args = get_args() if args.opts: cfg.merge_from_list(args.opts) cfg.freeze() start_epoch = 0 checkpoint_dir = Path(args.checkpoint) checkpoint_dir.mkdir(parents=True, exist_ok=True) # display nb of workers print(f"number of train workers {cfg.TRAIN.WORKERS}") # fetch features with open('train_features.pkl', 'rb') as f: train_features = pickle.load(f) with open('train_labels.pkl', 'rb') as f: train_labels = pickle.load(f) with open('valid_features.pkl', 'rb') as f: valid_features = pickle.load(f) with open('valid_labels.pkl', 'rb') as f: valid_labels = pickle.load(f) n_features = train_features.shape[1] * train_features.shape[ 2] * train_features.shape[3] n_classes = 101 # create model if cfg.MODEL.ARCH_STYLE == 'classifier': print("=> creating classifier") model = get_classifier(n_features=n_features, n_classes=n_classes) else: print("=> creating regressor") model = get_regressor(n_features=n_features, n_classes=n_classes) # Create optimizer if cfg.TRAIN.OPT == "sgd": optimizer = torch.optim.SGD(model.parameters(), lr=cfg.TRAIN.LR, momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WEIGHT_DECAY) else: optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR) device = "cuda" if torch.cuda.is_available() else "cpu" model = model.to(device) # GPU config if args.multi_gpu: model = nn.DataParallel(model) if device == "cuda": cudnn.benchmark = True # criterion if cfg.MODEL.ARCH_STYLE == 'classifier': if cfg.MODEL.SMOOTHING == True: print("=> using label smoothing") criterion = LabelSmoothingLoss( std_smoothing=cfg.MODEL.STD_SMOOTHING, n_classes=n_classes).to(device) else: criterion = nn.CrossEntropyLoss().to(device) else: if cfg.MODEL.ALEATORIC: criterion = HeteroscedasticGaussianLoss().to(device) else: criterion = nn.L1Loss(reduction="sum").to(device) # loaders train_loader = get_feature_loader(train_features, train_labels, batch_size=cfg.TEST.BATCH_SIZE, shuffle=True, num_workers=cfg.TRAIN.WORKERS, drop_last=True) val_loader = get_feature_loader(valid_features, valid_labels, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False, num_workers=cfg.TRAIN.WORKERS, drop_last=False) scheduler = StepLR(optimizer, step_size=cfg.TRAIN.LR_DECAY_STEP, gamma=cfg.TRAIN.LR_DECAY_RATE, last_epoch=start_epoch - 1) best_val_mae = 10000.0 train_writer = None if args.tensorboard is not None: opts_prefix = "_".join(args.opts) train_writer = SummaryWriter(log_dir=args.tensorboard + "/" + opts_prefix + "_train") val_writer = SummaryWriter(log_dir=args.tensorboard + "/" + opts_prefix + "_val") print('=> Start training') for epoch in range(start_epoch, cfg.TRAIN.EPOCHS): # train train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, device) # validate val_loss, val_acc, val_mae = validate(val_loader, model, criterion, epoch, device) if args.tensorboard is not None: train_writer.add_scalar("loss", train_loss, epoch) train_writer.add_scalar("acc", train_acc, epoch) val_writer.add_scalar("loss", val_loss, epoch) val_writer.add_scalar("acc", val_acc, epoch) val_writer.add_scalar("mae", val_mae, epoch) # checkpoint if val_mae < best_val_mae: print( f"=> [epoch {epoch:03d}] best val mae was improved from {best_val_mae:.3f} to {val_mae:.3f}" ) model_state_dict = model.module.state_dict( ) if args.multi_gpu else model.state_dict() torch.save( { 'epoch': epoch + 1, 'arch': cfg.MODEL.ARCH, 'state_dict': model_state_dict, 'optimizer_state_dict': optimizer.state_dict() }, str( checkpoint_dir.joinpath( "epoch{:03d}_{:.5f}_{:.4f}.pth".format( epoch, val_loss, val_mae)))) best_val_mae = val_mae else: print( f"=> [epoch {epoch:03d}] best val mae was not improved from {best_val_mae:.3f} ({val_mae:.3f})" ) # adjust learning rate scheduler.step() print("=> training finished") print(f"additional opts: {args.opts}") print(f"best val mae: {best_val_mae:.3f}")
def __init__(self, hparams): super().__init__(hparams) self.scale = hparams['scale'] self.self_ensemble = hparams['self_ensemble'] self.model = get_classifier(hparams["backbone"], hparams["dataset"])
def __init__(self, task_batch_size, data_batch_size, shuffle=True, train_size=0.80, share=False, train_task_types=None, val_task_ids=None, use_comp=False, use_filters=False): all_paths = glob.glob("data/*") self.shuffle = shuffle self.task_paths = [] for p in all_paths: self.task_paths.append(p) self.task_batch_size = task_batch_size self.data_batch_size = data_batch_size self.train_tasks = [] self.val_tasks = [] num_train_tasks = int(len(self.task_paths) * train_size) if train_task_types is None and val_task_ids is None: tasks = [] if shuffle: self.task_paths = np.random.permutation(self.task_paths) for p in self.task_paths: if 'Comp' in p: tasks.append(competetask(p, self.data_batch_size)) elif 'Large' in p: tasks.append(largetask(p, self.data_batch_size)) self.train_tasks = tasks[:num_train_tasks] self.val_tasks = tasks[num_train_tasks:] else: for p in self.task_paths: cut_path = p[p.rfind('\\') + 1:] task_id = cut_path[:2] task_type = p[p.rfind('-') + 1:] if int(task_id) in val_task_ids: print("Val task ", task_id) if 'Comp' in p: if use_comp: self.val_tasks.append( competetask(p, self.data_batch_size)) else: self.val_tasks.append( largetask(p, self.data_batch_size)) elif task_type in train_task_types or train_task_types == "All": print("Train task ", task_id) if 'Comp' in p: if use_comp: self.train_tasks.append( competetask(p, self.data_batch_size)) else: self.train_tasks.append( largetask(p, self.data_batch_size)) if shuffle: self.train_tasks = np.random.permutation( self.train_tasks).tolist() self.val_tasks = np.random.permutation(self.val_tasks).tolist() if share: self.classifiers = {} if use_filters: self.filters = {} for ta in self.train_tasks + self.val_tasks: self.classifiers[ta.classifier_id] = model.get_classifier( int(ta.stats['num_classes'])).to(device) if use_filters: self.filters[ta.filter_id] = model.Filter( ta.stats['num_channels'], ta.stats['length'], 128).to(device) for ta in self.train_tasks + self.val_tasks: ta.classifier = self.classifiers[ta.classifier_id] if use_filters: ta.filter = self.filters[ta.filter_id] print(self.classifiers) if use_filters: print(self.filters) self.num_train_iters = int( len(self.train_tasks) / self.task_batch_size) self.num_val_iters = int(len(self.val_tasks) / self.task_batch_size)