def load_model(input_dir): """ This hook can be implemented to adjust logic in the scoring mode. load_model hook provides a way to implement model loading your self. This function should return an object that represents your model. This object will be passed to the predict hook for performing predictions. This hook can be used to load supported models if your model has multiple artifacts, or for loading models that drum does not natively support :param input_dir: the directory to load serialized models from :returns: Object containing the model - the predict hook will get this object as a parameter """ # Returning a string with value "dummy" as the model. return MyModel(input_dir)
def __init__(self, camera): self.mainwin = QMainWindow() self.ui = Ui_MainWindow() self.ui.setupUi(self.mainwin) self.camera_scene = QGraphicsScene() self.ui.cameraView.setScene(self.camera_scene) self.camera = camera self.liveinput_timer = None self.counter = 0 self.words = [] self.lock = RLock() self.cam_image_fit_needed = True self.datamodel = MyModel() self.controller = MyController(self.datamodel) self.bindir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "EMNIST", "bin") self.model = load_model(self.bindir) self.mapping = pickle.load(open('%s/mapping.p' % self.bindir, 'rb'))
def run_train(self): print "[Trainer-Train] feedback file: \"{}\"".format(self.listfile_) print "[Trainer-Train] input: \"{}\"".format(self.inputfile_) print "[Trainer-Train] setting: \"{}\"".format(self.py_settingfile_) print "[Trainer-Train] dim: {}".format(self.unit_) print "[Trainer-Train] epoch: {}".format(self.epoch_) print "[Trainer-Train] mini-batch size: {}".format(self.batch_size_) print "[Trainer-Train] GPU id: {}".format(self.gpu_id_) # Remove old files self.remove(os.path.join(home_dir, "result")) # Initialize model to train model = MyModel(self.unit_) if self.gpu_id_ >= 0: cuda.get_device_from_id(self.gpu_id_).use() model.to_gpu(self.gpu_id_) # Load datasets and set up iterator self.train_ = ImportDataset(self.listfile_, self.inputfile_) train_iter = chainer.iterators.SerialIterator( self.train_, batch_size=self.batch_size_, shuffle=False) # Set optimizer optimizer = chainer.optimizers.AdaDelta() optimizer.setup(model) # Set up updateer and trainer updater = training.StandardUpdater(train_iter, optimizer, device=self.gpu_id_) trainer = training.Trainer(updater, (self.epoch_, "epoch"), os.path.join(home_dir, "result")) # Run trainer print "[Trainer-Train] Start main training." trainer.run() self.model_ = model.copy() print "[Trainer-Train] --> Finished."
def do_train(): # Reads label_map. label_map_path = os.path.join(args.data_path, "predicate2id.json") if not (os.path.exists(label_map_path) and os.path.isfile(label_map_path)): sys.exit("{} dose not exists or is not a file.".format(label_map_path)) with open(label_map_path, 'r', encoding='utf8') as fp: label_map = json.load(fp) num_classes = (len(label_map.keys()) - 2) * 2 + 2 train_num = len(open(os.path.join(args.data_path, 'train_data.json')).readlines()) # Loads pretrained model ERNIE model = MyModel(num_classes) tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') criterion = BCELossForDuIE() # Loads dataset. train_generator = DuIEDataset.from_file( os.path.join(args.data_path, 'train_data.json'), tokenizer, args.max_seq_length, True) train_dataset = tf.data.Dataset.from_generator(train_generator, (tf.int32, tf.int32, tf.int32, tf.int32, tf.int32), (tf.TensorShape([None]), tf.TensorShape([]), tf.TensorShape([None]), tf.TensorShape([None]), tf.TensorShape([None, 1]))).shuffle( 10000).batch(args.batch_size) eval_file_path = os.path.join(args.data_path, 'dev_data.json') test_generator = DuIEDataset.from_file(eval_file_path, tokenizer, args.max_seq_length, True) test_dataset = tf.data.Dataset.from_generator(test_generator, (tf.int32, tf.int32, tf.int32, tf.int32, tf.int32), (tf.TensorShape([None]), tf.TensorShape([]), tf.TensorShape([None]), tf.TensorShape([None]), tf.TensorShape([None, 1]))).batch( args.batch_size) # Defines learning rate strategy. steps_by_epoch = math.ceil(train_num / args.batch_size) num_training_steps = steps_by_epoch * args.num_train_epochs lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_ratio) # Generate parameter names needed to perform weight decay. # All bias and LayerNorm parameters are excluded. decay_params = [ tvar for tvar in tf.compat.v1.trainable_variables() if not any(nd in tvar.name for nd in ["bias", "Norm"]) ] optimizer = tfa.optimizers.AdamW(weight_decay=args.weight_decay, learning_rate=lr_scheduler) train_loss = tf.keras.metrics.Mean(name='train_loss') test_loss = tf.keras.metrics.Mean(name='test_loss') @tf.function def train_step(batch): input_ids, seq_lens, tok_to_orig_start_index, tok_to_orig_end_index, labels = batch with tf.GradientTape() as tape: logits = model(input_ids=input_ids) mask = (input_ids != 0).logical_and((input_ids != 1)).logical_and((input_ids != 2)) loss = criterion((logits, labels, mask)) gradients = tape.gradient(loss, decay_params) optimizer.apply_gradients(zip(gradients, decay_params)) train_loss(loss) # Starts training. global_step = 0 logging_steps = 50 save_steps = 10000 tic_train = time.time() for epoch in range(args.num_train_epochs): print("\n=====start training of %d epochs=====" % epoch) tic_epoch = time.time() train_loss.reset_states() for step, batch in enumerate(train_dataset): train_step(batch) loss_item = train_loss.result() global_step += 1 if global_step % logging_steps == 0: print("epoch: %d / %d, steps: %d / %d, loss: %f, speed: %.2f step/s" % (epoch, args.num_train_epochs, step, steps_by_epoch, loss_item, logging_steps / (time.time() - tic_train))) tic_train = time.time() if global_step % save_steps == 0: print("\n=====start evaluating ckpt of %d steps=====" % global_step) test_loss.reset_states() precision, recall, f1 = evaluate(model, criterion, test_dataset, test_loss, eval_file_path, "eval") print("precision: %.2f\t recall: %.2f\t f1: %.2f\t" % (100 * precision, 100 * recall, 100 * f1)) print("saving checkpoing model_%d.pdparams to %s " % (global_step, args.output_dir)) model.save(os.path.join(args.output_dir, "model_%d.pdparams.h5" % global_step)) tic_epoch = time.time() - tic_epoch print("epoch time footprint: %d hour %d min %d sec" % ( tic_epoch // 3600, (tic_epoch % 3600) // 60, tic_epoch % 60)) # Does final evaluation. print("\n=====start evaluating last ckpt of %d steps=====" % global_step) test_loss.reset_states() precision, recall, f1 = evaluate(model, criterion, test_dataset, test_loss, eval_file_path, "eval") print("precision: %.2f\t recall: %.2f\t f1: %.2f\t" % (100 * precision, 100 * recall, 100 * f1)) model.save(os.path.join(args.output_dir, "model_%d.pdparams.h5" % global_step)) print("\n=====training complete=====")
labels = data_array[:, 0] features_tensor = Variable(torch.Tensor(features).float()) labels_tensor = Variable(torch.Tensor(labels).long()) # Hyper parameters input_neurons = features.shape[1] hidden_neurons = 13 output_neurons = np.unique(labels).size learning_rate = 0.01 num_epoch = 1000000 assert input_neurons == ATTRIBUTE_NUMBER assert output_neurons == CLASS_NUMBER # NN net = MyModel(input_neurons, hidden_neurons, output_neurons) loss_func = torch.nn.CrossEntropyLoss() optimiser = torch.optim.SGD(net.parameters(), lr=learning_rate) # Cuda if torch.cuda.is_available(): features_tensor = features_tensor.cuda() labels_tensor = labels_tensor.cuda() net.cuda() all_losses=[] for epoch in range(num_epoch): # Perform forward pass: compute predicted y by passing x to the model. pred = net(features_tensor) # Compute loss
def populate(): session = DBSession() model = MyModel(name=u'root', value=55) session.add(model) session.flush() transaction.commit()
type=str, help='flower image filepath') parser.add_argument('checkpoint', action="store", type=str, help='model checkpoint file, eg: checkpoint.pth') parser.add_argument('--top_k', action="store", type=int, help='topk return', default=5) parser.add_argument('--category_names', action="store", type=str, help='category name json file', default="cat_to_name.json") parser.add_argument('--gpu', action='store_true', help='use gpu, if not specified, will use cpu', default=False) args = parser.parse_args() mm = MyModel(category_name_filepath=args.category_names, use_gpu=args.gpu) model, optimizer = mm.load_checkpoint(args.checkpoint) answer = mm.predict(args.image_path, model, topk=args.top_k) print("Predictions: ") print(answer)
from pypdevs.simulator import Simulator from mymodel import MyModel model = MyModel() simulator = Simulator(model) simulator.setVerbose() simulator.setClassicDEVS() simulator.simulate()
from modeller.automodel import * from modeller.parallel import * from myloop import MyLoop from mymodel import MyModel log.verbose() env = environ() env.io.atom_files_directory = ['.', '../atom_files'] # Create a new class based on 'loopmodel' so that we can redefine # select_loop_atoms a = MyModel(env, alnfile = 'alignment.ali', # alignment filename knowns = ('bare_protein'), # codes of the templates sequence = 'R_extended', # code of the target assess_methods=assess.DOPE) # assess each loop with DOPE #a = MyLoop(env, # alnfile = 'alignment.ali', # alignment filename # knowns = ('pdbfile'), # codes of the templates # sequence = 'rdrp', # code of the target # loop_assess_methods=assess.DOPE) # assess each loop with DOPE a.starting_model= 1 # index of the first model a.ending_model = 500 # index of the last model #a.loop.starting_model = 1 # First loop model #a.loop.ending_model = 10 # Last loop model #a.loop.md_level = refine.very_slow # Loop model refinement level
def build_model(self): self.model = MyModel() self.model.compile( optimizer=tf.keras.optimizers.Adam(self.learning_rate), loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
parser.add_argument('--epochs', action='store', type=int, help='how many epochs to train', default=1) parser.add_argument('--learning_rate', action='store', type=float, help='learning rate', default=0.001) parser.add_argument('--hidden_units', action='store', type=int, help='hidden unit', default=4096) parser.add_argument('--save_dir', action='store', type=str, help='save model into file', default='.') args = parser.parse_args() mm = MyModel(use_gpu=args.gpu, epochs=args.epochs) model = mm.create_model(base_model=args.arch, hidden_unit_num=args.hidden_units) optimizer = mm.create_optimizer(model, learning_rate=args.learning_rate) mm.train_model(model, optimizer) mm.save_checkpoint(model, optimizer) if args.save_dir != None: mm.save_checkpoint(model, optimizer, dir=args.save_dir)
def run_LOOCV(self): # Cross-validation testing. print "[Trainer-LOOCV] Start leave-one-out testing." loo = LeaveOneOut() split_samples = SplitImportDataset(self.train_.base_) true_labels = [] predicted_labels = [] for train_index, [test_index] in loo.split(split_samples.input_base_): print "[Trainer-LOOCV] Test: [{}], Train: {}.".format( test_index, train_index) # Initialize model to train model_loccv = MyModel(self.unit_) if self.gpu_id_ >= 0: cuda.get_device_from_id(self.gpu_id_).use() model_loccv.to_gpu(self.gpu_id_) split_samples.split_LOOCV(train_index) train_iter = chainer.iterators.SerialIterator( split_samples, batch_size=self.batch_size_, shuffle=False) # Set optimizer optimizer = chainer.optimizers.AdaDelta() optimizer.setup(model_loccv) # Set up updateer and trainer updater = training.StandardUpdater(train_iter, optimizer, device=self.gpu_id_) trainer = training.Trainer(updater, (self.epoch_, "epoch"), os.path.join(home_dir, "result")) # Run trainer trainer.run() # Testing clf = Chainer2Sklearn(model_loccv.to_cpu()) X_test = [list(split_samples.input_base_[test_index][0])] true_label = int(split_samples.input_base_[test_index][1]) predicted_label = int(clf.predict(X_test)) true_labels.append(true_label) predicted_labels.append(predicted_label) # Calculate accuracy and valiance acc = accuracy_score(true_labels, predicted_labels) val = self.variance_score(acc, len(split_samples.input_base_)) print "[Trainer-LOOCV] Accuracy (average): {}, Variance: {}.".format( acc, val) # Save as figure. if (self.train_.iter_num_ - 1) == 1: if os.path.exists(os.path.join(home_dir, "acc_val.npy")): os.remove(os.path.join(home_dir, "acc_val.npy")) acc_val = np.array([[0.0], [0.0]]) else: acc_val = np.load(os.path.join(home_dir, "acc_val.npy")) self.acc_val_ = np.append(acc_val, [[acc], [val]], axis=1) np.save(os.path.join(home_dir, "acc_val.npy"), self.acc_val_) # Draw reriability graph. self.drawGraph()