Exemplo n.º 1
0
 def _add_student(self):
     n = int(input('Nhap n: '))
     for i in range(n):
         print("Nhap thong tin cho Student {}".format(i + 1))
         _id = raw_input('Nhap ID: ')
         name = raw_input('Nhap Name: ')
         add = raw_input('Nhap add: ')
         sex = raw_input('Nhap Sex: ')
         math = float(raw_input('Nhap diem Toan: '))
         phy = float(raw_input('Nhap diem ly: '))
         chem = float(raw_input('Nhap diem hoa: '))
         student = Model(_id, name, add, sex, math, phy, chem)
         # print (student)
         std_dic = {
             'gender': sex,
             'add': add,
             'scores': {
                 'chemistry': chem,
                 'physics': phy,
                 'math': math
             },
             'id': id,
             'name': name
         }
         self._tmp.append(str(std_dic))
     return self._tmp
Exemplo n.º 2
0
def simplify(model, min_lat, max_lat, delta_lat, min_lon, max_lon, delta_lon,
             min_z, max_z, delta_z):
    # change the grid of parameters of a model

    # construct new grid
    grid_latitudes, grid_longitudes, grid_depths = np.mgrid[
        min_lat:max_lat:(max_lat - min_lat + 1) / delta_lat * 1j,
        min_lon:max_lon:(max_lon - min_lon + 1) / delta_lon * 1j,
        min_z:max_z:(max_z - min_z + 1) / delta_z * 1j]

    grid = np.array((grid_latitudes.flatten(), grid_longitudes.flatten(),
                     grid_depths.flatten())).T

    # get parameters interpolate on new grid
    parameters_grid = model.getParameters(grid).T

    # remove undefined values
    bool_nan = [np.isnan(parameter_grid) for parameter_grid in parameters_grid]
    out_of_range = np.logical_or(bool_nan[0], bool_nan[1])
    for i in range(2, len(bool_nan)):
        out_of_range = np.logical_or(out_of_range, bool_nan[i])

    latitudes = grid_latitudes.flatten()[~out_of_range]
    longitudes = grid_longitudes.flatten()[~out_of_range]
    depths = grid_depths.flatten()[~out_of_range]

    parameters_in_range = [
        parameter_grid[~out_of_range] for parameter_grid in parameters_grid
    ]
    p1, p2, p3, p4, p5, p6 = parameters_in_range

    return Model(latitudes, longitudes, depths, p1, p2, p3, p4, p5, p6)
Exemplo n.º 3
0
def main(argv):
    fps: int = 24
    res = (1280, 720)

    try:
        classification_rate = argv[1]
        classification_rate = int(classification_rate)
        mode = str(argv[2])
        weights = int(argv[3])
        threshold = float(argv[4])
        ai = Ai(classification_rate, mode, weights, threshold)
    except (TypeError, IndexError):
        ai = Ai()

    global path
    try:
        path = argv[4]
        path = int(path)
        cam = Cam(path)
    except IndexError:
        cam = Cam()
    except ValueError:
        cam = Cam(path)
    model = Model(cam, ai, fps, res)
    view = DebugGUI(cam, model)

    Controller(model, ai, view)

    model.start()
    os.kill(os.getpid(), 1)

    pass
def create_model(**kwargs):
    model = Model(**kwargs)
    model.add(number_of_neurons=6)
    model.add(number_of_neurons=3)
    model.add(number_of_neurons=1)

    return model
Exemplo n.º 5
0
    def __init__(self):
        """Initialize the object.

        First part of two-part initialization.
        The initialization done here should be low risk - we need the GUI to
        be built before we can show error messages.
        """
        self.model = Model()

        # Create the panels by instantiating each of the tabs. Note the order
        # in the list is the tab order in the GUI.
        self.about = About(self)
        self.managedata = ManageData(self)
        self.runforecast = RunForecast(self)
        self.forecastbytime = ForecastByTime(self)
        self.forecastdistribution = ForecastDistribution(self)
        self.forecastbygeography = ForecastByGeography(self)
        self.forecastbystate = ForecastByState(self)
        self.pollviewer = PollViewer(self)

        self.panels = [
            self.about, self.managedata, self.runforecast, self.forecastbytime,
            self.forecastdistribution, self.forecastbygeography,
            self.forecastbystate, self.pollviewer
        ]

        # Create tabs, note the order here is the display order.
        self.tabs = Tabs(tabs=[p.panel for p in self.panels])
Exemplo n.º 6
0
def train(config_file, save_dir, model_path):
    with open(config_file, 'r') as configs:
        config = yaml.load(configs, Loader=yaml.Loader)['train']
    pprint(config)

    strategy = tf.distribute.MirroredStrategy()
    batch_size = \
        config['batch_size_per_replica'] * strategy.num_replicas_in_sync

    dataset_builder = DatasetBuilder(**config['dataset_builder'])
    train_ds = dataset_builder.build(config['train_ann_paths'], batch_size,
                                     True)
    val_ds = dataset_builder.build(config['val_ann_paths'], batch_size, False)

    model_class = Model(config['dataset_builder']['img_shape'],
                        dataset_builder.num_classes)
    model = model_class.build()
    model.compile(optimizer=tf.keras.optimizers.Adam(config['learning_rate']),
                  loss=CTCLoss(),
                  metrics=[SequenceAccuracy()])

    if config['restore']:
        model.load_weights(config['restore'], by_name=True, skip_mismatch=True)

    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(model_path),
        tf.keras.callbacks.ReduceLROnPlateau(**config['reduce_lr']),
        XTensorBoard(log_dir=str(save_dir), **config['tensorboard'])
    ]

    model.fit(train_ds,
              epochs=config['epochs'],
              callbacks=callbacks,
              validation_data=val_ds)
Exemplo n.º 7
0
def main(args, defaults):
    parameters = process_args(args, defaults)
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s',
        filename=parameters.log_path)
    with sess.as_default():
        model = Model(phase=parameters.phase,
                      visualize=parameters.visualize,
                      data_path=parameters.data_path,
                      data_base_dir=parameters.data_base_dir,
                      output_dir=parameters.output_dir,
                      batch_size=parameters.batch_size,
                      initial_learning_rate=parameters.initial_learning_rate,
                      num_epoch=parameters.num_epoch,
                      steps_per_checkpoint=parameters.steps_per_checkpoint,
                      target_vocab_size=parameters.target_vocab_size,
                      model_dir=parameters.model_dir,
                      target_embedding_size=parameters.target_embedding_size,
                      attn_num_hidden=parameters.attn_num_hidden,
                      attn_num_layers=parameters.attn_num_layers,
                      load_model=parameters.load_model,
                      valid_target_length=float('inf'),
                      gpu_id=parameters.gpu_id,
                      use_gru=parameters.use_gru,
                      session=sess)
        model.launch()
Exemplo n.º 8
0
    def __init__(self, comm, id, reload=False):
        threading.Thread.__init__(self)
        self.comm = comm
        self.slave_id = id
        self.config = FuzzerConfiguration()
        self.q = qemu(id,
                      self.comm.files[2],
                      self.comm.qemu_socket_prefix,
                      config=self.config)
        self.model = Model(self)
        self.comm.register_model(self.slave_id, self.model)
        self.state = SlaveState.WAITING
        self.payload_sem = threading.BoundedSemaphore(value=1)
        self.payload_sem.acquire()
        self.idx_sem = threading.BoundedSemaphore(value=1)
        self.idx_sem.acquire()
        self._stop_event = threading.Event()
        self.bitmap_size = self.config.config_values['BITMAP_SHM_SIZE']
        self.bitmap_filename = self.comm.files[2] + str(self.slave_id)
        self.comm.slave_locks_bitmap[self.slave_id].acquire()
        self._qemu_ready = False

        self.reproduce = self.config.argument_values['reproduce']

        self.globalmodel = None
        if self.reproduce:
            self.globalmodel = GlobalModel(self.config)

        # Grab the lock during initialization
        if self.slave_id < len(self.comm.concolic_locks):
            self.comm.concolic_locks[self.slave_id].acquire()
            log_slave("concolic locked", self.slave_id)
def train(data):
    print('Training model...')
    save_data_setting(data)
    model = Model(data).to(device)
    optimizer = optim.RMSprop(model.parameters(), lr=data.lr, momentum=data.momentum)
    for epoch in range(data.epoch):
        print('Epoch: %s/%s' % (epoch, data.epoch))
        optimizer = lr_decay(optimizer, epoch, data.lr_decay, data.lr)
        total_loss = 0
        random.shuffle(data.ids)
        model.train()
        model.zero_grad()
        train_num = len(data.ids)
        total_batch = train_num // data.batch_size + 1
        for batch in trange(total_batch):
            start, end = slice_set(batch, data.batch_size, train_num)
            instance = data.ids[start:end]
            if not instance: continue
            *model_input, _ = load_batch(instance)
            loss = model.neg_log_likelihood_loss(*model_input)
            total_loss += loss.data.item()
            loss.backward()
            optimizer.step()
            model.zero_grad()
        print('Epoch %d loss = %.3f' % (epoch, total_loss))
    torch.save(model.state_dict(), data.model_path)
Exemplo n.º 10
0
    def train(self):
        data_hdfs = self.spark_session.read.csv(self.data_path)
        dataset = np.array(data_hdfs.select("_c5").collect())
        print("dataset.shape: ", dataset.shape)

        #scale dataset
        dataset = self.scaler.fit_transform(dataset)

        #Split data to train/test set
        train = dataset[:self.train_samples]
        test = dataset[self.train_samples:]

        #Prepare data to training
        x_train, y_train = self.get_data(train, self.look_back)
        x_test, y_test = self.get_data(test, self.look_back)
        x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
        x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
        print("x_train.shape: ", x_train.shape)
        print("x_test.shape: ", x_test.shape)

        print("TRAINING MODEL")
        model = Model().lstm_basic(x_train.shape[1])
        model.fit(x_train,
                  y_train,
                  epochs=5,
                  batch_size=1,
                  validation_data=(x_test, y_test))

        print("EVALUATE MODEL")
        model.evaluate(x_test, y_test, batch_size=1)

        #save model
        model.save(self.model_path)
Exemplo n.º 11
0
    def setup_train(self):

        self.model = Model()
        self.trainer = torch.optim.Adam(self.model.parameters(), lr=config.lr)

        start_iter = 0

        if self.opt.load_model is not None:
            load_model_path = os.path.join(config.saved_model_path,
                                           self.opt.load_model)
            checkpoint = torch.load(load_model_path)

            start_iter = checkpoint['iter']
            self.model.load_state_dict(checkpoint['model_dict'])
            self.trainer.load_state_dict(checkpoint['trainer_dict'])

            logging.debug('load model at:' + load_model_path)

        if self.opt.new_lr is not None:
            self.trainer = torch.optim.Adam(self.model.parameters(),
                                            lr=self.opt.new_lr)

            logging.debug('update the lr to:' + self.opt.new_lr)

        return start_iter
Exemplo n.º 12
0
 def __init__(self):
     self.layout = Layout(Const.WORLD)
     Display.initGraphics(self.layout)
     self.model = Model(self.layout)
     self.carChanges = {}
     self.errorCounter = Counter()
     self.consecutiveLate = 0
Exemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('dir', help='Path to output directory')
    parser.add_argument('--t', '-test', action='store_true', help='test')

    args = parser.parse_args()
    print('loading data')
    df = pd.read_csv('data/test.csv', delimiter=',') if args.t else pd.read_csv('data/train.csv', delimiter=',')
    value = 3
    if value == 1:
        model = Model()
        dp = model.DP()
        df_processed = dp.process(df, args.t)
    elif value == 2:
        model = LGBMModel()
        dp = model.DP()
        df_processed = dp.process(df, args.t)
    else:
        model = GruAttModel()
        dp = model.DP()
        df_processed = dp.process(df, args.t)

    if not args.t:
        model.train(df_processed)
    else:
        df = model.test(df_processed)
        sample_submission = pd.read_csv('data/sample_submission.csv')
        if not os.path.isdir(args.dir):
            os.mkdir(args.dir)
        sample_submission['project_is_approved'] = df['project_is_approved']
        sample_submission.to_csv(os.path.join(args.dir, 'result.csv'), index=False)
Exemplo n.º 14
0
def main():
    if len(sys.argv) < 3:
        print('Usage: {0} data_dir algorithm_type [output_dir]'.format(
            sys.argv[0]))
        return 1

    dataDirectory = sys.argv[1]
    model = Model(dataDirectory)

    try:
        algorithmTypeStr = sys.argv[2]
        algorithmType = Model.AlgorithmType[algorithmTypeStr]
    except KeyError:
        print("Unknown AlgorithmType: {0}".format(algorithmTypeStr))
        return 1

    start = datetime.now()

    carsUsage, points = model.run(algorithmType)

    end = datetime.now()

    print(carsUsage)

    print_green('Elapsed time (hh:mm:ss.ms): {0}'.format(end - start))

    if len(sys.argv) > 3:
        output_dir = sys.argv[3]
        print_texmap_data(output_dir, carsUsage, points)
Exemplo n.º 15
0
def main(args):
    device = torch.device('cuda' if args.use_cuda else 'cpu')
    args.sample_rate = {
        '8k': 8000,
        '16k': 16000,
        '24k': 24000,
        '48k': 48000,
    }[args.sample_rate]
    model = Model(
        rnn_layers=args.rnn_layers,
        rnn_units=args.rnn_units,
        win_len=args.win_len,
        win_inc=args.win_inc,
        fft_len=args.fft_len,
        win_type=args.win_type,
        mode=args.target_mode,
    )
    if not args.log_dir:
        writer = SummaryWriter(os.path.join(args.exp_dir, 'tensorboard'))
    else:
        writer = SummaryWriter(args.log_dir)
    model.to(device)
    if not args.decode:
        train(model, FLAGS, device, writer)
    reload_for_eval(model, FLAGS.exp_dir, FLAGS.use_cuda)
    decode(model, args, device)
Exemplo n.º 16
0
def main(dataset, model, dropout, bias_init, learning_rate, class_weights,
         metrics, epochs, save_path, log_path):
    args = locals()
    os.makedirs(os.path.dirname(log_path), exist_ok=True)
    os.makedirs(os.path.dirname(save_path), exist_ok=True)

    logger = Logger(__name__)
    fd = open(log_path, "a")
    old_fd = sys.stdout
    sys.stdout = fd
    logger.logger.info("Begin")
    print(" ".join([
        "--{} {}".format(
            key,
            str(val) if not isinstance(val, list) else " ".join(map(str, val)))
        for key, val in args.items() if val is not None
    ]))

    dataset = Dataset(dataset)
    dataset.build()

    model = Model(model, dropout, bias_init, class_weights, learning_rate,
                  metrics)
    model.build()

    model.train(dataset.train_gen, dataset.val_gen, epochs, save_path)

    logger.logger.info("End")
    sys.stdout = old_fd
    fd.close()
Exemplo n.º 17
0
    def __init__(self):
        # 接口地址
        self.dataUrl = 'https://web-api.juejin.im/query'

        # 读取配置信息
        config = configparser.ConfigParser()
        config_path = path + '/conf/spider.conf'
        config.read(config_path, encoding='utf-8')
        # 获取当前环境
        env = config.get('main', 'env')
        mysql_section = 'mysql_' + env
        db = {
            'host': config.get(mysql_section, 'host'),
            'port': int(config.get(mysql_section, 'port')),
            'user': config.get(mysql_section, 'user'),
            'password': config.get(mysql_section, 'pass'),
            'db': config.get(mysql_section, 'db')
        }

        r = {
            'host': config.get('redis', 'host'),
            'port': config.get('redis', 'port')
        }

        # 连接数据库
        self.model = Model(db['host'], db['port'], db['user'], db['password'], db['db'])
        self.redis = redis.Redis(r['host'], r['port'])
Exemplo n.º 18
0
 def setUp(self):
     self.M = Model()
     Q = {'q1,∨', 'q2,∨', 'q3,∨', 'q4,∨', 'q5,∨', 'qaccept,∧'}
     for q in Q:
         self.M.add_state(q)
     Sig = {'0'}
     for a in Sig:
         self.M.add_input_symbol(a)
     self.M.change_left_end('|-')
     self.M.change_blank(' ')
     self.M.change_start('q1')
     delta = {
         ('q1', 'q1'): {'|-->|-,R'},
         ('q1', 'q2'): {'0-> ,R'},
         ('q2', 'qaccept'): {' -> ,R'},
         ('q2', 'q2'): {'x->x,R'},
         ('q2', 'q3'): {'0->x,R'},
         ('q3', 'q4'): {'0->0,R'},
         ('q3', 'q3'): {'x->x,R'},
         ('q3', 'q5'): {' -> ,L'},
         ('q4', 'q4'): {'x->x,R'},
         ('q4', 'q3'): {'0->x,R'},
         ('q5', 'q5'): {'x->x,L', '0->0,L'},
         ('q5', 'q2'): {' -> ,R'}
     }
     for p, q in delta:
         for m in delta[(p, q)]:
             self.M.add_transition(p, q, m)
Exemplo n.º 19
0
def main(args):
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        model = Model(phase='test',
                      visualize=args.visualize,
                      output_dir=args.output_dir,
                      batch_size=args.batch_size,
                      initial_learning_rate=args.initial_learning_rate,
                      steps_per_checkpoint=None,
                      model_dir=args.model_dir,
                      target_embedding_size=args.target_embedding_size,
                      attn_num_hidden=args.attn_num_hidden,
                      attn_num_layers=args.attn_num_layers,
                      clip_gradients=args.clip_gradients,
                      max_gradient_norm=args.max_gradient_norm,
                      session=sess,
                      load_model=True,
                      gpu_id=args.gpu_id,
                      use_gru=args.use_gru,
                      use_distance=args.use_distance,
                      max_image_width=args.max_width,
                      max_image_height=args.max_height,
                      max_prediction_length=args.max_prediction,
                      channels=args.channels)

        model.test(data_path=args.dataset)
Exemplo n.º 20
0
def train_loop(hp, logger, writer):
    # make dataloader
    logger.info("Making train dataloader...")
    train_loader = create_dataloader(hp, DataloaderMode.train)
    logger.info("Making test dataloader...")
    test_loader = create_dataloader(hp, DataloaderMode.test)

    # init Model
    net_arch = Net_arch(hp)
    loss_f = torch.nn.MSELoss()
    model = Model(hp, net_arch, loss_f)

    if hp.load.resume_state_path is not None:
        model.load_training_state(logger)
    else:
        logger.info("Starting new training run.")

    try:
        for model.epoch in itertools.count(model.epoch + 1):
            if model.epoch > hp.train.num_iter:
                break
            train_model(hp, model, train_loader, writer, logger)
            if model.epoch % hp.log.chkpt_interval == 0:
                model.save_network(logger)
                model.save_training_state(logger)
            test_model(hp, model, test_loader, writer)
        logger.info("End of Train")
    except Exception as e:
        logger.info("Exiting due to exception: %s" % e)
        traceback.print_exc()
Exemplo n.º 21
0
def run_whole_dataset(X, y, model_file_path):
    import time

    _, nn_accuracy, nn_AUC = load_model(model_file_path).evaluate(
        X, to_categorical_sk(y))

    train_data = DataValues(X=X, y=y)
    # This is never used
    test_data = DataValues(X=X, y=y)

    # Initialise NN Model object
    NN_model = Model(model_path=model_file_path,
                     output_classes=DATASET_INFO.output_classes,
                     train_data=train_data,
                     test_data=test_data,
                     activations_path=TEMP_DIR + 'activations/')

    # Rule Extraction
    start_time = time.time()
    tracemalloc.start()
    rules = RULE_EXTRACTOR.run(NN_model)
    current, peak = tracemalloc.get_traced_memory()
    tracemalloc.stop()
    end_time = time.time()

    # Use rules for prediction
    NN_model.set_rules(rules)

    # Rule extraction time and memory usage
    time = end_time - start_time
    # converting KiB to MB
    memory = current * (1024 / 1000000)

    return nn_accuracy, nn_AUC, rules, time, memory
    def __init__(self):
        self.root = Tk.Tk()
        self.model = Model("investment.db")
        self.view = View(self.root)
        self.view.search_field.search_button.config(
            command=self.search_command)
        self.view.buttons.b1_viewall.config(command=self.view_command)
        self.view.buttons.b2_add.config(command=self.add_command)
        self.view.buttons.b3_update.config(command=self.update_command)
        self.view.buttons.b4_delete.config(command=self.delete_command)
        self.view.buttons.b5_close.config(command=self.root.destroy)

        # Builds the listbox with all the entries on init
        self.view.investment_list.build_list(self.model.view())
        self.view.investment_list.investment_list.bind('<<TreeviewSelect>>',
                                                       self.selectedItem)

        # Create a Dataframe for ease of computation
        self.total_df = self.model.invest_dataframe()
        self.invtype_total_df = self.total_df.groupby('investment_type')

        # Dashboard Labels on init
        self.dashboard_summary_labels('Create')

        # Chart on init
        self.view.dashboard.update_chart(
            self.invtype_total_df['principal_amount'].sum(),
            self.invtype_total_df.groups.keys())
Exemplo n.º 23
0
    def __init__(self, args):
        # Save images
        self.results_dir = args.results_dir

        # Preprocess image
        self.img_height = args.img_height
        self.img_width = args.img_width
        self.crop_size = args.crop_size
        self.labels = get_all_labels(None, args.semantic_label_path)
        self.n_labels = len(self.labels)

        # Use VAE
        self.use_vae = args.use_vae

        # Define Encoder, Generator
        img_shape = [1, args.crop_size, args.crop_size, 3]
        segmap_shape = [1, args.crop_size, args.crop_size, self.n_labels]
        if args.use_vae:
            encoder = Encoder(img_shape,
                              crop_size=args.crop_size,
                              num_filters=args.num_encoder_filters)
        generator = Generator(segmap_shape,
                              num_upsampling_layers=args.num_upsampling_layers,
                              num_filters=args.num_generator_filters,
                              use_vae=args.use_vae)

        # Initialize model
        self.model = Model(args, generator, None, encoder, training=False) if args.use_vae else \
                     Model(args, generator, None, training=False)

        # Define checkpoint-saver
        self.checkpoint = tf.train.Checkpoint(encoder=encoder,
                                              generator=generator) if args.use_vae else \
                          tf.train.Checkpoint(generator=generator)
        self.manager_model = tf.train.CheckpointManager(self.checkpoint,
                                                        args.checkpoint_dir,
                                                        max_to_keep=3)

        # Restore the latest checkpoint in checkpoint_dir
        self.checkpoint.restore(self.manager_model.latest_checkpoint)
        print()
        if self.manager_model.latest_checkpoint:
            INFO("Checkpoint restored from " +
                 self.manager_model.latest_checkpoint)
        else:
            ERROR("No checkpoint was found.")
        print()
Exemplo n.º 24
0
def updateTestResults(testResults, model, percentage, parameters):
    currentAlg = model.getNNBot().getLearningAlg()
    originalNoise = currentAlg.getNoise()
    clonedModel = cloneModel(model)
    currentAlg.setNoise(0)

    originalTemp = None
    if str(currentAlg) != "AC":
        originalTemp = currentAlg.getTemperature()
        currentAlg.setTemperature(0)
    currentEval = testModel(clonedModel, 5, 15000 if not clonedModel.resetLimit else clonedModel.resetLimit,
                            model.getPath(), "test", False)

    params = Params(0, False, parameters.EXPORT_POINT_AVERAGING)
    pelletModel = Model(False, False, params, False)
    pelletModel.createBot("NN", currentAlg, parameters)
    pelletEval = testModel(pelletModel, 5, 15000, model.getPath(), "pellet", False)

    if parameters.MULTIPLE_BOTS_PRESENT:
        greedyModel = pelletModel
        greedyModel.createBot("Greedy", None, parameters)
        vsGreedyEval = testModel(greedyModel, 5, 20000, model.getPath(), "vsGreedy", False)
    else:
        vsGreedyEval = (0,0,0,0)

    virusGreedyEval = (0, 0, 0, 0)
    virusEval = (0, 0, 0, 0)
    if parameters.VIRUS_SPAWN:
        params = Params(0, True, parameters.EXPORT_POINT_AVERAGING)
        virusModel = Model(False, False, params, False)
        virusModel.createBot("NN", currentAlg, parameters)
        virusEval = testModel(virusModel, 5, 15000, model.getPath(), "pellet_with_virus", False)
        if parameters.MULTIPLE_BOTS_PRESENT:
            virusModel.createBot("Greedy", None, parameters)
            virusGreedyEval = testModel(virusModel, 5, 20000, model.getPath(), "vsGreedy_with_virus", False)


    currentAlg.setNoise(originalNoise)

    if str(currentAlg) != "AC":
        currentAlg.setTemperature(originalTemp)

    meanScore = currentEval[2]
    stdDev = currentEval[3]
    testResults.append((meanScore, stdDev, pelletEval[2], pelletEval[3],
                        vsGreedyEval[2], vsGreedyEval[3], virusEval[2], virusEval[3], virusGreedyEval[2], virusGreedyEval[3]))
    return testResults
Exemplo n.º 25
0
def main(args):
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    m = Model(args.restore)
    if args.test:
        m.test()
    else:
        m.train()
Exemplo n.º 26
0
def plot_active_cases(country):
    data.process_data(country)
    model = Model(data.dtf)
    model.forecast()
    model.add_deaths(data.mortality)
    result = Result(model.dtf)
    # Python function to render output panel
    return result.plot_active(model.today)
Exemplo n.º 27
0
def plot_total_cases(country):
    data.process_data(country)
    model = Model(data.dtf)
    model.forecast()
    model.add_deaths(data.mortality)
    result = Result(model.dtf)
    # Python function to plot active cases
    return result.plot_total(model.today)
Exemplo n.º 28
0
def load_model():

    model = Model()
    model.cuda(0)
    model.load_state_dict(
        torch.load('./model/model.pth', map_location={'cuda:0': 'cuda:0'}))
    model.eval()
    return model
Exemplo n.º 29
0
def run():
    c = Controller()
    v = ViewCLI(c)
    m = Model(c)
    c.model = m
    c.view = v

    v.start()
Exemplo n.º 30
0
def get_model(args):
    return Model(embedding_dim=args.embedding_dim,
    image_size=args.image_size,
    input_dim = args.input_dim,
    attribute_dim=args.pos_dim+args.area_dim,
    refinement_dims=args.refinement_dims if args.gene_layout else None,    
    box_refine_arch=args.box_refine_arch if args.box_refine else None,
    roi_cat_feature=args.roi_cat_feature)