def train(self): # train conv stack layer by layer for i, stack in enumerate(self.conv_stack): if self.checkpoint_file != '': model = load(self.checkpoint_file) self.net = FastNet(self.learning_rate, self.image_shape, self.n_out, initModel=model) # delete softmax layer self.net.del_layer() self.net.del_layer() # for i in range(len(self.fc_params)): # self.net.del_layer() self.net.disable_bprop() layerParam = stack + [self.fakefc_param, self.softmax_param] self.net.append_layers_from_dict(layerParam) self.init_data_provider() self.scheduler.reset() self.scheduler.set_level(i) self.test_outputs = [] self.train_output = [] AutoStopTrainer.train(self) # train fc layer for i, stack in enumerate(self.fc_stack): model = load(self.checkpoint_file) self.net = FastNet(self.learning_rate, self.image_shape, self.n_out, initModel=model) self.net.del_layer() self.net.del_layer() self.net.disable_bprop() if i == len(self.fc_stack) - 1: layerParam = stack + [self.softmax_param] else: layerParam = stack + [self.fakefc_param, self.softmax_param] self.net.append_layers_from_dict(layerParam) self.init_data_provider() self.scheduler.reset() self.scheduler.set_level(i) self.test_outputs = [] self.train_output = [] AutoStopTrainer.train(self) model = load(self.checkpoint_file) self.test_id += 1 self.net = FastNet(self.learning_rate, self.image_shape, self.n_out, initModel=model) self.test_range = self.origin_test_range self.init_data_provider() self.scheduler = Scheduler(self) self.num_epoch /= 2 AutoStopTrainer.train(self)
def train(self): MiniBatchTrainer.train(self) for i, cate in enumerate(self.range_list): self.set_category_range(cate) self.num_batch = self.curr_epoch = self.curr_batch = 0 self.curr_minibatch = 0 self.num_minibatch = self.train_minibatch_list[i] model = load(self.checkpoint_file) layers = model['model_state']['layers'] for l in layers: if l['type'] == 'fc': l['weight'] = None l['bias'] = None l['weightIncr'] = None l['biasIncr'] = None fc = layers[-2] fc['outputSize'] = cate self.learning_rate = self.learning_rate_list[i] self.net = FastNet(self.learning_rate, self.image_shape, self.n_out, init_model = model) self.net.clear_weight_incr() MiniBatchTrainer.train(self)
def _get_next_batch(self): self.get_next_index() if self.curr_batch_index == 0: random.shuffle(self.batch_range) self.curr_epoch += 1 self.curr_batch = self.batch_range[self.curr_batch_index] # print self.batch_range, self.curr_batch filename = os.path.join(self.data_dir, 'data_batch_%d' % self.curr_batch) data = util.load(filename) img = data['data'] - self.batch_meta['data_mean'] self.labels = np.array(data['labels']) self.data = np.require(img, requirements='C', dtype=np.float32)
def _get_next_batch(self): self.get_next_index() if self.curr_batch_index == 0: random.shuffle(self.batch_range) self.curr_epoch += 1 self.curr_batch = self.batch_range[self.curr_batch_index] # print self.batch_range, self.curr_batch filename = os.path.join(self.data_dir, 'data_batch_%d' % self.curr_batch) self.data = util.load(filename) return BatchData(self.data['data'] - self.batch_meta['data_mean'], np.array(self.data['labels']), self.curr_epoch, self.curr_batch)
def _get_next_batch(self): self.get_next_index() if self.curr_batch_index == 0: random.shuffle(self.batch_range) self.curr_epoch += 1 self.curr_batch = self.batch_range[self.curr_batch_index] filename = os.path.join(self.data_dir + '.%s' % self.curr_batch) data_dic = util.load(filename) #data = np.concantenate([data[self.data_name] for data in data_list], axis = 1) #labels = np.concatenate([np.array( data['labels'].tolist() ) for data in data_list]) data = data_dic[self.data_name].transpose() labels = data_dic['labels'] self.labels = labels data = np.require(data, requirements='C', dtype=np.float32) self.data = data
def train(self): AutoStopTrainer.train(self) if self.layerwised: for i in range(len(self.n_filters) - 1): next_n_filter = [self.n_filters[i + 1]] next_size_filter = [self.size_filters[i + 1]] model = load(self.checkpoint_file) self.net = FastNet(self.learning_rate, self.image_shape, 0, initModel=model) self.net.del_layer() self.net.del_layer() self.net.disable_bprop() self.net.add_parameterized_layers(next_n_filter, next_size_filter, self.fc_nouts) self.init_data_provider() self.scheduler = Scheduler(self) self.test_outputs = [] self.train_outputs = [] AutoStopTrainer.train(self)
def __init__(self, data_dir='.', batch_range=None): self.data_dir = data_dir self.meta_file = os.path.join(data_dir, 'batches.meta') self.curr_batch_index = 0 self.curr_batch = None self.curr_epoch = 1 self.data = None if os.path.exists(self.meta_file): self.batch_meta = util.load(self.meta_file) else: print 'No default meta file \'batches.meta\', using another meta file' if batch_range is None: self.batch_range = self.get_batch_indexes() else: self.batch_range = batch_range random.shuffle(self.batch_range)
def __init__(self, checkpoint_dir, test_id): self.checkpoint_dir = checkpoint_dir if not os.path.exists(self.checkpoint_dir): os.system('mkdir -p \'%s\'' % self.checkpoint_dir) self.test_id = test_id self.regex = re.compile('^test%d-(\d+)$' % self.test_id) cp_pattern = self.checkpoint_dir + '/test%d-*' % self.test_id cp_files = glob.glob(cp_pattern) if not cp_files: self.checkpoint = None self.checkpoint_file = None else: self.checkpoint_file = sorted(cp_files, key=os.path.getmtime)[-1] util.log('Loading from checkpoint file: %s', self.checkpoint_file) self.checkpoint = util.load(self.checkpoint_file)
def train(self): MiniBatchTrainer.train(self) for i, group in enumerate(self.num_group_list): self.set_num_group(group) self.num_batch = self.curr_epoch = self.curr_batch = 0 self.curr_minibatch = 0 self.num_minibatch = self.train_minibatch_list[i] model = load(self.checkpoint_file) layers = model['model_state']['layers'] fc = layers[-2] fc['outputSize'] = group fc['weight'] = None fc['bias'] = None fc['weightIncr'] = None fc['biasIncr'] = None self.learning_rate = self.learning_rate_list[i] self.net = FastNet(self.learning_rate, self.image_shape, self.n_out, init_model = model) self.net.clear_weight_incr() MiniBatchTrainer.train(self)
def __init__(self, checkpoint, show_filters, channels = 3): self.checkpoint = checkpoint self.model = util.load(self.checkpoint) self.layers = self.model['layers'] self.show_filters = show_filters self.channels = channels
param_dict['factor'] = factor[0] else: param_dict['factor'] = factor learning_rate = util.string_to_float_list(args.learning_rate) if len(learning_rate) == 1: param_dict['learning_rate'] = learning_rate[0] else: param_dict['learning_rate'] = learning_rate param_dict['batch_size'] = args.batch_size param_dict['checkpoint_dir'] = args.checkpoint_dir trainer = args.trainer cp_pattern = param_dict['checkpoint_dir'] + '/test%d' % param_dict['test_id'] cp_files = glob.glob('%s*' % cp_pattern) if not cp_files: util.log('No checkpoint, starting from scratch.') param_dict['init_model'] = Parser(args.param_file).get_result() else: cp_file = sorted(cp_files, key=os.path.getmtime)[-1] util.log('Loading from checkpoint file: %s', cp_file) param_dict['init_model'] = util.load(cp_file) trainer = get_trainer_by_name(trainer, param_dict, args) util.log('start to train...') trainer.train() #trainer.predict(['pool5'], 'image.opt')
def __init__(self, checkpoint, show_filters, channels = 3): self.checkpoint = checkpoint self.model = util.load(self.checkpoint) self.layers = self.model['model_state']['layers'] self.show_filters = show_filters self.channels = channels