def post(self): data = request.get_json() isbn = data['isbn'] name = data['name'] description = data['description'] price = data['price'] writer = data['writer'] handle = data['handle'] password = data['password'] if handle != utils.admin_handle or password != utils.admin_password: return utils.return_response(message='authentication error') model = BaseModel(isbn=isbn, name=name, description=description, price=price, writer=writer) try: db.session.add(model) db.session.commit() except SQLAlchemyError as e: current_app.logger.error(e) db.session.rollback() return utils.return_response(message='Error in Database') else: return utils.return_response(message='Data inserted Ok')
def create_model(opt): model = BaseModel(opt) if opt.model == 'Base': pass elif opt.model == 'PATN': model = PATNTransferModel(opt) elif opt.model == 'CTPS': model = CTPSModel(opt) elif opt.model == 'CAN': model = CANModel(opt) elif opt.model == 'DCGAN': model = DCGANModel(opt) elif opt.model == "AdaIN": model = AdaIN(opt) else: raise ValueError("Model [%s] not recognized." % opt.model) print("=> model [{}] was created".format(model.name)) return model
def create_model(self, params=None): """ Create the input-output model. """ self.model = BaseModel(self.p)
#!/usr/bin/python3 from models import storage from models.base import BaseModel all_objs = storage.all() print("-- Reloaded objects --") for obj_id in all_objs.keys(): obj = all_objs[obj_id] print(obj) print("-- Create a new object --") my_model = BaseModel() my_model.name = "Holberton" my_model.my_number = 89 my_model.save() print(my_model)
parser.add_argument("--epochs", type=int, default=50, help="train epochs") parser.add_argument("--batch_size", type=int, default=16, help="batch size") parser.add_argument("--emb_drop_rate", type=float, default=0.2, help="dropout rate for embeddings") parser.add_argument("--rnn_drop_rate", type=float, default=0.5, help="dropout rate for embeddings") parser.add_argument("--max_to_keep", type=int, default=1, help="maximum trained model to be saved") parser.add_argument("--no_imprv_tolerance", type=int, default=None, help="no improvement tolerance") config = Configurations(parser.parse_args()) # os environment os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3" os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_idx # if dataset is not prepared, then build it if not os.path.exists(config.save_path) or not os.listdir(config.save_path): process_base(config) print("load dataset...") train_ratio = int(config.train_ratio) if float(config.train_ratio) > 1.0 else float(config.train_ratio) dataset = Dataset(config.train_set, config.dev_set, config.test_set, batch_size=config.batch_size, train_rate=train_ratio, shuffle=True) print("build model and train...") model = BaseModel(config) if config.restore_model: model.restore_last_session() if config.train: model.train(dataset) model.restore_last_session() model.evaluate_data(dataset.get_data_batches("test"), name="test") model.close_session()