Exemplo n.º 1
0
    def __init__(self):

        # Setup Parameters
        util.params = None
        self.dnnModelPath = util.getFullFileName(
            util.getParameter('DnnModelPath'))
        self.numTrainingInstances = util.getParameter(
            'NumActivationTrainingInstances')
        self.timestamp = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
        self.outputName = util.getSetupFileDescription(
        ) + '--' + self.timestamp
        self.outputDir = 'output/%s' % (self.outputName)
        util.makeDirectory(self.outputDir)
        util.isLoggingEnabled = util.getParameter('LoggingEnabled')
        util.logPath = self.outputDir + '/%s.log' % (self.outputName)
        util.logLevel = util.getParameter('LogLevel')
        util.thisLogger = util.Logger()
        util.storeSetupParamsInLog()

        # Setup memory environment
        self.processorType = processorType = util.getParameter('ProcessorType')

        self.startTime = datetime.datetime.now()

        self.streamList = None
        self.clustererList = None
        self.classMaxValues1 = None  # max value of raw activation data
        self.classMaxValues2 = None  # max value of reduced activation data

        self.flatActivations = None
        self.activationBatches = None
        self.batchFlatActivations = None
        self.reducedFlatActivations = None
Exemplo n.º 2
0
 def __init__(self, pool=None, echo=False, logger=None, default_ordering=False, echo_pool=False, echo_uow=False, convert_unicode=False, encoding='utf-8', **params):
     """constructs a new SQLEngine.   SQLEngines should be constructed via the create_engine()
     function which will construct the appropriate subclass of SQLEngine."""
     # get a handle on the connection pool via the connect arguments
     # this insures the SQLEngine instance integrates with the pool referenced
     # by direct usage of pool.manager(<module>).connect(*args, **params)
     schema.SchemaEngine.__init__(self)
     (cargs, cparams) = self.connect_args()
     if pool is None:
         params['echo'] = echo_pool
         params['use_threadlocal'] = True
         self._pool = sqlalchemy.pool.manage(self.dbapi(), **params).get_pool(*cargs, **cparams)
     elif isinstance(pool, sqlalchemy.pool.DBProxy):
         self._pool = pool.get_pool(*cargs, **cparams)
     else:
         self._pool = pool
     self.default_ordering=default_ordering
     self.echo = echo
     self.echo_uow = echo_uow
     self.convert_unicode = convert_unicode
     self.encoding = encoding
     self.context = util.ThreadLocal()
     self._ischema = None
     self._figure_paramstyle()
     self.logger = logger or util.Logger(origin='engine')
Exemplo n.º 3
0
def train(conf):
    logger = util.Logger(conf)
    if not os.path.exists(conf.checkpoint_dir):
        os.makedirs(conf.checkpoint_dir)

    model_name = conf.model_name
    dataset_name = "ClassificationDataset"
    collate_name = "FastTextCollator" if model_name == "FastText" \
        else "ClassificationCollator"
    train_data_loader, validate_data_loader, test_data_loader = \
        get_data_loader(dataset_name, collate_name, conf)
    empty_dataset = globals()[dataset_name](conf, [], mode="train")
    model = get_classification_model(model_name, empty_dataset, conf)
    loss_fn = globals()["ClassificationLoss"](label_size=len(
        empty_dataset.label_map),
                                              loss_type=conf.train.loss_type)
    optimizer = get_optimizer(conf, model)
    evaluator = cEvaluator(conf.eval.dir)
    trainer = globals()["ClassificationTrainer"](empty_dataset.label_map,
                                                 logger, evaluator, conf,
                                                 loss_fn)

    best_epoch = -1
    best_performance = 0
    model_file_prefix = conf.checkpoint_dir + "/" + model_name
    for epoch in range(conf.train.start_epoch,
                       conf.train.start_epoch + conf.train.num_epochs):
        start_time = time.time()
        trainer.train(train_data_loader, model, optimizer, "Train", epoch)
        trainer.eval(train_data_loader, model, optimizer, "Train", epoch)
        performance = trainer.eval(validate_data_loader, model, optimizer,
                                   "Validate", epoch)
        trainer.eval(test_data_loader, model, optimizer, "test", epoch)
        if performance > best_performance:  # record the best model
            best_epoch = epoch
            best_performance = performance
        save_checkpoint(
            {
                'epoch': epoch,
                'model_name': model_name,
                'state_dict': model.state_dict(),
                'best_performance': best_performance,
                'optimizer': optimizer.state_dict(),
            }, model_file_prefix)
        time_used = time.time() - start_time
        logger.info("Epoch %d cost time: %d second" % (epoch, time_used))

    # best model on validateion set
    best_epoch_file_name = model_file_prefix + "_" + str(best_epoch)
    best_file_name = model_file_prefix + "_best"
    shutil.copyfile(best_epoch_file_name, best_file_name)

    load_checkpoint(model_file_prefix + "_" + str(best_epoch), conf, model,
                    optimizer)
    trainer.eval(test_data_loader, model, optimizer, "Best test", best_epoch)
Exemplo n.º 4
0
def kfold_eval(conf):
    logger = util.Logger(conf)
    model_name = conf.model_name
    dataset_name = "ClassificationDataset"
    collate_name = "FastTextCollator" if model_name == "FastText" \
        else "ClassificationCollator"

    test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)
    collate_fn = globals()[collate_name](conf, len(test_dataset.label_map))
    test_data_loader = DataLoader(
        test_dataset, batch_size=conf.eval.batch_size, shuffle=False,
        num_workers=conf.data.num_worker, collate_fn=collate_fn,
        pin_memory=True)

    empty_dataset = globals()[dataset_name](conf, [])
    model = get_classification_model(model_name, empty_dataset, conf)
    optimizer = get_optimizer(conf, model)
    load_checkpoint(conf.eval.model_dir, conf, model, optimizer)
    model.eval()
    predict_probs = []
    standard_labels = []
    evaluator = cEvaluator(conf.eval.dir)
    for batch in test_data_loader:
        logits = model(batch)
        result = torch.sigmoid(logits).cpu().tolist()
        predict_probs.extend(result)
        standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])

        # ============================ EVALUATION API ============================================================================================
    y_test, predictions = [], []

    print (standard_labels)
    for i, j in zip(standard_labels, predict_probs):
            y_test.append(i)
            predictions.append(j)



    pred, actual = take_values(predictions, y_test , conf.eval.threshold, conf.eval.top_k )
    print(pred)
    actual=np.array(actual)
    pred=np.array(pred)

    evaluation_measures={"Accuracy": accuracy(actual, pred) ,
                             "Precision": precision(actual, pred) ,
                             "Recall": recall(actual, pred) ,
                             "F1 score": f1_scor(actual, pred, ) ,
                             "Hamming Loss":hammingLoss(actual, pred),
                             "f-1 Macro":macroF1(actual, pred) ,
                             "f-1 Micro":microF1(actual, pred),
                             "averagePrecision":averagePrecision(actual, pred)
                             }
    return evaluation_measures
Exemplo n.º 5
0
def evaluate_detector(args):
    """Evaluate directional point detector."""
    args.cuda = not args.disable_cuda and torch.cuda.is_available()
    device = torch.device('cuda:' + str(args.gpu_id) if args.cuda else 'cpu')
    torch.set_grad_enabled(False)

    dp_detector = DirectionalPointDetector(
        3, args.depth_factor, config.NUM_FEATURE_MAP_CHANNEL).to(device)
    if args.detector_weights:
        dp_detector.load_state_dict(torch.load(args.detector_weights))
    dp_detector.eval()

    psdataset = ParkingSlotDataset(args.dataset_directory)
    logger = util.Logger(enable_visdom=args.enable_visdom)

    total_loss = 0
    position_errors = []
    direction_errors = []
    ground_truths_list = []
    predictions_list = []
    for iter_idx, (image, marking_points) in enumerate(psdataset):
        ground_truths_list.append(marking_points)

        image = torch.unsqueeze(image, 0).to(device)
        prediction = dp_detector(image)
        objective, gradient = generate_objective([marking_points], device)
        loss = (prediction - objective)**2
        total_loss += torch.sum(loss * gradient).item()

        pred_points = get_predicted_points(prediction[0], 0.01)
        predictions_list.append(pred_points)

        dists, angles = collect_error(marking_points, pred_points,
                                      config.CONFID_THRESH_FOR_POINT)
        position_errors += dists
        direction_errors += angles

        logger.log(iter=iter_idx, total_loss=total_loss)

    precisions, recalls = util.calc_precision_recall(ground_truths_list,
                                                     predictions_list,
                                                     match_marking_points)
    average_precision = util.calc_average_precision(precisions, recalls)
    if args.enable_visdom:
        logger.plot_curve(precisions, recalls)

    sample = torch.randn(1, 3, config.INPUT_IMAGE_SIZE,
                         config.INPUT_IMAGE_SIZE)
    flops, params = profile(dp_detector, inputs=(sample.to(device), ))
    logger.log(average_loss=total_loss / len(psdataset),
               average_precision=average_precision,
               flops=flops,
               params=params)
Exemplo n.º 6
0
def eval(conf):
    logger = util.Logger(conf)
    model_name = conf.model_name
    dataset_name = "ClassificationDataset"
    collate_name = "FastTextCollator" if model_name == "FastText" \
        else "ClassificationCollator"

    test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)
    collate_fn = globals()[collate_name](conf, len(test_dataset.label_map))
    test_data_loader = DataLoader(test_dataset,
                                  batch_size=conf.eval.batch_size,
                                  shuffle=False,
                                  num_workers=conf.data.num_worker,
                                  collate_fn=collate_fn,
                                  pin_memory=True)

    empty_dataset = globals()[dataset_name](conf, [])
    model = get_classification_model(model_name, empty_dataset, conf)
    optimizer = get_optimizer(conf, model.parameters())
    load_checkpoint(conf.eval.model_dir, conf, model, optimizer)
    model.eval()
    is_multi = False
    if conf.task_info.label_type == ClassificationType.MULTI_LABEL:
        is_multi = True
    predict_probs = []
    standard_labels = []
    total_loss = 0.
    evaluator = cEvaluator(conf.eval.dir)
    for batch in test_data_loader:
        logits = model(batch)
        if not is_multi:
            result = torch.nn.functional.softmax(logits, dim=1).cpu().tolist()
        else:
            result = torch.sigmoid(logits).cpu().tolist()
        predict_probs.extend(result)
        standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])
    total_loss = total_loss / len(predict_probs)
    (_, precision_list, recall_list, fscore_list, right_list,
     predict_list, standard_list) = \
        evaluator.evaluate(
            predict_probs, standard_label_ids=standard_labels, label_map=empty_dataset.label_map,
            threshold=conf.eval.threshold, top_k=conf.eval.top_k,
            is_flat=conf.eval.is_flat, is_multi=is_multi)
    logger.warn(
        "Performance is precision: %f, "
        "recall: %f, fscore: %f, right: %d, predict: %d, standard: %d." %
        (precision_list[0][cEvaluator.MICRO_AVERAGE],
         recall_list[0][cEvaluator.MICRO_AVERAGE],
         fscore_list[0][cEvaluator.MICRO_AVERAGE],
         right_list[0][cEvaluator.MICRO_AVERAGE],
         predict_list[0][cEvaluator.MICRO_AVERAGE],
         standard_list[0][cEvaluator.MICRO_AVERAGE]))
    evaluator.save()
Exemplo n.º 7
0
def explain(args, network, explain_batches, log=None):
    # Logger
    log = log or util.Logger(verbose=args.verbose, flush=True)
    # Explaine all predictions
    for batch, y in explain_batches:
        # Renew the computation graph
        dy.renew_cg()
        # Initialize layers
        network.init(test=True, update=False)
        # Trace max matches
        matches = network.max_matches(batch)
        # Print
        for b, (scores, start_pos, end_pos) in enumerate(matches):
            # Retrieve sentence
            sentence = network.dic.string(batch.unpadded_sequences[b])
            # Print sentence
            log("-" * 80)
            log(" ".join(sentence))
            # Print top contibuting patterns
            class_weights = network.softmax.W_p.as_array()
            contrib = (class_weights[1] - class_weights[0]) * scores

            def print_pattern(idx):
                """Print a single pattern match"""
                polarity = "positive" if contrib[idx] > 0 else "negative"
                match_str = " ".join([
                    word if pos >= start_pos[idx] and pos <= end_pos[idx] else
                    "_" * len(word) for pos, word in enumerate(sentence)
                ])
                log(f"Pattern {idx} ({polarity})\t"
                    f"{contrib[idx]:.2f}\t{match_str}")

            # log("Top patterns")
            top_contrib = np.abs(contrib).argsort()[-args.n_top_contrib:]
            # for pattern_idx in reversed(top_contrib):
            word_contrib = np.zeros(len(sentence))
            for pattern in top_contrib:
                phrase_slice = slice(start_pos[pattern], end_pos[pattern] + 1)
                word_contrib[phrase_slice] += contrib[pattern]

            pos_str = " ".join([
                word if word_contrib[i] > 0 else "_" * len(word)
                for i, word in enumerate(sentence)
            ])
            log(f"Positive contribution: {pos_str}")
            neg_str = " ".join([
                word if word_contrib[i] < 0 else "_" * len(word)
                for i, word in enumerate(sentence)
            ])
            log(f"Negative contribution: {neg_str}")
Exemplo n.º 8
0
def main(_):
    config = Config(config_file='conf/fasttext_token_char.config')
    predictor = Predictor(config)
    predict_probs = []
    standard_labels = []
    logger = util.Logger(config)
    if not os.path.exists(config.eval.eval_dir):
        os.makedirs(config.eval.eval_dir)
    with codecs.open(config.eval.eval_dir + "/predict.txt",
                     "w",
                     encoding=util.CHARSET) as predict_file:
        texts = []
        for line in codecs.open(config.eval.text_file, "r",
                                encoding='gb18030'):
            line = line.strip("\n")
            texts.append(line)
        batch_size = config.eval.batch_size
        epochs = math.ceil(len(texts) / batch_size)

        for i in range(epochs):
            predicts = predictor.predict(texts[i * batch_size:(i + 1) *
                                               batch_size])
            for k in range(len(predicts)):
                predict_result = "Nil\t0"
                predict = predicts[k]
                line = texts[i * batch_size + k]
                if predict is not None:
                    predict_np = np.array(predict[0], dtype=np.float32)
                    predict_label = predictor.data_processor.id_to_label_map[
                        np.argmax(predict_np)]
                    predict_result = "%s\t%f" % (predict_label,
                                                 np.max(predict_np))
                    predict_probs.append(predict[0])
                    standard_labels.append(line.split("\t")[0])
                predict_file.write(predict_result + "\t" + line + "\n")
    evaluator = Evaluator(config.eval.eval_dir)
    multi_label = config.eval.multi_label
    (precision_list, recall_list, fscore_list,
     standard_list) = evaluator.evaluate(predict_probs, standard_labels,
                                         predictor.data_processor.label_map,
                                         config.eval.threshold, multi_label)
    logger.info(
        "Test performance, precision: %f, recall: %f, f1: %f,  standard: %d" %
        (
            precision_list[0][evaluator.MICRO_AVERAGE],
            recall_list[0][evaluator.MICRO_AVERAGE],
            fscore_list[0][evaluator.MICRO_AVERAGE],
            standard_list[0][evaluator.MICRO_AVERAGE],
        ))
    evaluator.save()
Exemplo n.º 9
0
def psevaluate_detector(args):
    """Evaluate directional point detector."""
    args.cuda = not args.disable_cuda and torch.cuda.is_available()
    device = torch.device('cuda:' + str(args.gpu_id) if args.cuda else 'cpu')
    torch.set_grad_enabled(False)

    dp_detector = DirectionalPointDetector(
        3, args.depth_factor, config.NUM_FEATURE_MAP_CHANNEL).to(device)
    if args.detector_weights:
        dp_detector.load_state_dict(torch.load(args.detector_weights))
    dp_detector.eval()

    logger = util.Logger(enable_visdom=args.enable_visdom)

    ground_truths_list = []
    predictions_list = []
    for idx, label_file in enumerate(os.listdir(args.label_directory)):
        name = os.path.splitext(label_file)[0]
        print(idx, name)
        image = cv.imread(os.path.join(args.image_directory, name + '.jpg'))
        pred_points = detect_marking_points(dp_detector, image,
                                            config.CONFID_THRESH_FOR_POINT,
                                            device)
        slots = []
        if pred_points:
            marking_points = list(list(zip(*pred_points))[1])
            slots = inference_slots(marking_points)
        pred_slots = []
        for slot in slots:
            point_a = marking_points[slot[0]]
            point_b = marking_points[slot[1]]
            prob = min((pred_points[slot[0]][0], pred_points[slot[1]][0]))
            pred_slots.append(
                (prob, Slot(point_a.x, point_a.y, point_b.x, point_b.y)))
        predictions_list.append(pred_slots)

        with open(os.path.join(args.label_directory, label_file), 'r') as file:
            ground_truths_list.append(get_ground_truths(json.load(file)))

    precisions, recalls = util.calc_precision_recall(ground_truths_list,
                                                     predictions_list,
                                                     match_slots)
    average_precision = util.calc_average_precision(precisions, recalls)
    if args.enable_visdom:
        logger.plot_curve(precisions, recalls)
    logger.log(average_precision=average_precision)
Exemplo n.º 10
0
def instantiate_network(args, dic, log=None):
    """Create the neural network from command line arguments"""
    log = log or util.Logger(verbose=args.verbose, flush=True)
    # Instantiate the network
    network = models.model_from_args(args.model_type, dic, 2, args)
    # Print some infor about the number of parameters
    log(f"{network.__class__.__name__} model with:")
    log(f"Total parameters: {num_params(network.pc)}")
    log(f" - word embeddings: {num_params(network.pc, params=False)}")
    log(f" - other: {num_params(network.pc, lookup_params=False)}")
    # Load pretrained word embeddings maybe
    if args.pretrained_embeds is not None:
        network.load_pretrained_embeddings(args.pretrained_embeds)
        network.freeze_embeds = args.freeze_embeds
    # normalize to unit norm
    if args.normalize_embeds:
        network.normalize_embeddings()
    return network
Exemplo n.º 11
0
def train_detector(args):
    """Train directional point detector."""
    args.cuda = not args.disable_cuda and torch.cuda.is_available()
    device = torch.device('cuda:' + str(args.gpu_id) if args.cuda else 'cpu')
    torch.set_grad_enabled(True)

    dp_detector = DirectionalPointDetector(
        3, args.depth_factor, config.NUM_FEATURE_MAP_CHANNEL).to(device)
    if args.detector_weights:
        print("Loading weights: %s" % args.detector_weights)
        dp_detector.load_state_dict(torch.load(args.detector_weights))
    dp_detector.train()

    optimizer = torch.optim.Adam(dp_detector.parameters(), lr=args.lr)
    if args.optimizer_weights:
        print("Loading weights: %s" % args.optimizer_weights)
        optimizer.load_state_dict(torch.load(args.optimizer_weights))

    logger = util.Logger(args.enable_visdom, ['train_loss'])
    data_loader = DataLoader(data.ParkingSlotDataset(args.dataset_directory),
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=args.data_loading_workers,
                             collate_fn=lambda x: list(zip(*x)))

    for epoch_idx in range(args.num_epochs):
        for iter_idx, (images, marking_points) in enumerate(data_loader):
            images = torch.stack(images).to(device)

            optimizer.zero_grad()
            prediction = dp_detector(images)
            objective, gradient = generate_objective(marking_points, device)
            loss = (prediction - objective)**2
            loss.backward(gradient)
            optimizer.step()

            train_loss = torch.sum(loss * gradient).item() / loss.size(0)
            logger.log(epoch=epoch_idx, iter=iter_idx, train_loss=train_loss)
            if args.enable_visdom:
                plot_prediction(logger, images, marking_points, prediction)
        torch.save(dp_detector.state_dict(),
                   'weights/dp_detector_%d.pth' % epoch_idx)
        torch.save(optimizer.state_dict(), 'weights/optimizer.pth')
Exemplo n.º 12
0
    def __init__(self, args, play_file=None, thread_no=0, global_list=None):
        self.args = args
        self.play_file = play_file
        self.current_state = None
        self.thread_no = thread_no
        self.global_list = global_list

        if self.args.screen_order == 'hws':
            self.batch_dimension = (self.args.train_batch_size,
                                    self.args.screen_height,
                                    self.args.screen_width,
                                    self.args.screen_history)
        else:
            self.batch_dimension = (self.args.train_batch_size,
                                    self.args.screen_history,
                                    self.args.screen_height,
                                    self.args.screen_width)

        if self.args.use_color_input:
            self.blank_screen = np.zeros(
                (self.args.screen_height, self.args.screen_width, 3))
        else:
            self.blank_screen = np.zeros(
                (self.args.screen_height, self.args.screen_width))
        self.total_step = 0
        self.epoch_done = 0
        self.next_test_thread_no = 0
        self.train_start = time.strftime('%Y%m%d_%H%M%S')

        if os.path.exists('output') == False:
            os.makedirs('output')
        if os.path.exists('snapshot') == False:
            os.makedirs('snapshot')

        if self.play_file is None and self.thread_no == 0:
            log_file = "output/%s_%s.log" % (args.game, self.train_start)
            util.Logger(log_file)

        if os.path.exists(args.snapshot_folder) == False:
            os.makedirs(args.snapshot_folder)

        self.print_env()
        self.initialize_post()
Exemplo n.º 13
0
    def __init__(self, settings):
        self.settings = settings
        self.totalGameNo = settings['total_game_no']
        self.playedGameNo = 0
        self.simStepNo = settings['sim_step_no']
        self.saveStepNo = settings['save_step_no']
        self.display = settings['display']
        self.env = ConnectFourEnv(self.display)
        self.visited = {}           # (stateStr, turn, action), visited
        self.won = {}              # (stateStr, turn, action), won
        self.DRAW = -1
        self.PLAYER = 1
        self.OPP = 2
        self.simpleAgent = SimpleAgent(self.env, self.OPP, self.PLAYER)
        self.winnerResult = {self.DRAW:0, self.PLAYER:0, self.OPP:0}
        self.greedyEpsilon = 0.1

        self.startTime = time.strftime('%Y%m%d_%H%M%S')
        logFile="output/%s.log" % (self.startTime)            
        util.Logger(logFile)

        self.testMode = False
        self.debugger = DebugInput(self).start()
Exemplo n.º 14
0
    parser.add_argument('--train_ps.verbose', type=bool, default=True)

    parser.add_argument('--train_ps.binary_search', action='store_true')
    parser.add_argument('--train_ps.bnd_type', type=str, default='direct')

    parser.add_argument('--train_ps.T_step', type=float, default=1e-7)
    parser.add_argument('--train_ps.T_end', type=float, default=np.inf)
    parser.add_argument('--train_ps.eps_tol', type=float, default=1.25)

    parser.add_argument('--train_ps.n', type=float, default=5000)
    parser.add_argument('--train_ps.eps', type=float, default=0.01)
    parser.add_argument('--train_ps.delta', type=float, default=1e-5)

    args = parser.parse_args()
    args = util.to_tree_namespace(args)
    args.device = tc.device('cpu') if args.cpu else tc.device('cuda:0')
    args = util.propagate_args(args, 'device')
    args = util.propagate_args(args, 'exp_name')
    args = util.propagate_args(args, 'snapshot_root')

    ## setup logger
    os.makedirs(os.path.join(args.snapshot_root, args.exp_name), exist_ok=True)
    sys.stdout = util.Logger(
        os.path.join(args.snapshot_root, args.exp_name, 'out'))

    ## print args
    util.print_args(args)

    ## run
    main(args)
Exemplo n.º 15
0
def train(conf):
    model_name = conf.model_name
    logger = util.Logger(conf)
    if conf.task_info.weak_pretrain:
        logger.info("Batch Size: " + str(conf.train.batch_size) +
                    " Pretrain Num Epoch: " +
                    str(conf.train.pretrain_num_epochs))
    else:
        logger.info("Batch Size: " + str(conf.train.batch_size))

    if conf.task_info.weak_pretrain and conf.task_info.weak_data_augmentation:
        model_teacher = get_classification_model(model_name, empty_dataset,
                                                 conf)
        if conf.model_name != "BERT":
            optimizer_teacher = get_optimizer(conf, model_teacher)
        else:
            optimizer_teacher = AdamW(model_teacher.parameters(),
                                      lr=5e-2,
                                      eps=1e-2)
        # optimizer_teacher: optimizer for teacher model

    model_target = get_classification_model(model_name, empty_dataset, conf)
    loss_fn = globals()["ClassificationLoss"](label_size=len(
        empty_dataset.label_map),
                                              loss_type=conf.train.loss_type)

    if conf.task_info.weak_pretrain:
        if conf.model_name != "BERT":
            optimizer_weak = get_optimizer(conf, model_target)
        else:
            optimizer_weak = AdamW(model_target.parameters(),
                                   lr=5e-2,
                                   eps=1e-2)
        # optimizer_weak: optimizer for target model pretraining stage
    if conf.model_name != "BERT":
        optimizer_target = get_optimizer(conf, model_target)
    else:
        optimizer_target = AdamW(model_target.parameters(), lr=5e-2, eps=1e-2)
    # optimizer_target: optimizer for target model fine-tuning stage
    evaluator = cEvaluator(conf.eval.dir)

    trainer_target = globals()["ClassificationTrainer"](
        empty_dataset.label_map, logger, evaluator, conf, loss_fn)
    # trainer_target: trainer for target model on fine-tuning stage
    if conf.task_info.weak_pretrain:
        trainer_weak = globals()["ClassificationTrainer"](
            empty_dataset.label_map, logger, evaluator, conf, loss_fn)
        # trainer_weak: trainer for target model on pretraining stage
        if conf.task_info.weak_data_augmentation:
            trainer_teacher = globals()["ClassificationTrainer"](
                empty_dataset.label_map, logger, evaluator, conf, loss_fn)
            # trainer_teacher: trainer for teacher model

    if conf.task_info.weak_data_augmentation:
        best_epoch = -1
        best_performance = 0
        model_file_prefix = conf.checkpoint_dir + "/" + model_name + "_teacher"

        logger.info("Training Teacher Model on Labeled Data")
        for epoch in range(conf.train.start_epoch,
                           conf.train.start_epoch + conf.train.num_epochs):
            start_time = time.time()
            trainer_teacher.train(train_data_loader, model_teacher,
                                  optimizer_teacher, "Train", epoch)
            trainer_teacher.eval(train_data_loader, model_teacher,
                                 optimizer_teacher, "Train", epoch)
            performance = trainer_teacher.eval(validate_data_loader,
                                               model_teacher,
                                               optimizer_teacher, "Validate",
                                               epoch)
            trainer_teacher.eval(test_data_loader, model_teacher,
                                 optimizer_teacher, "Test", epoch)

            if performance > best_performance:  # record the best model
                best_epoch = epoch
                best_performance = performance
                temp_model = model_teacher
                save_checkpoint(
                    {
                        'epoch': epoch,
                        'model_name': model_name,
                        'state_dict': model_teacher.state_dict(),
                        'best_performance': best_performance,
                        'optimizer': optimizer_teacher.state_dict(),
                    }, model_file_prefix)

            time_used = time.time() - start_time
            logger.info("Epoch %d cost time: %d second" % (epoch, time_used))
    best_epoch = -1
    best_performance = 0
    if conf.task_info.weak_pretrain:
        if conf.task_info.weak_data_augmentation:
            unlabeled_data_train_data_loader = select_unlabeled_data(
                temp_model, unlabeled_train_data_loader,
                len(trainer_weak.label_map), conf)

        logger.info("Pretraining on Weak Supervision Data")
        for epoch in range(
                conf.train.start_epoch,
                conf.train.start_epoch + conf.train.pretrain_num_epochs):
            start_time = time.time()
            trainer_weak.train(unlabeled_train_data_loader, model_target,
                               optimizer_weak, "Train", epoch)
            trainer_weak.eval(unlabeled_train_data_loader, model_target,
                              optimizer_weak, "Train", epoch)
            performance = trainer_weak.eval(validate_data_loader, model_target,
                                            optimizer_weak, "Validate", epoch)
            trainer_weak.eval(test_data_loader, model_target, optimizer_weak,
                              "Test", epoch)

            if performance > best_performance:  # record the best model
                temp_model = model_target
            time_used = time.time() - start_time
            logger.info("Epoch %d cost time: %d second" % (epoch, time_used))
        model_target = temp_model

    logger.info("Fine-tuning on Labeled Data")

    best_epoch = -1
    best_performance = 0
    if conf.task_info.weak_pretrain:
        if conf.task_info.weak_data_augmentation:
            model_file_prefix = conf.checkpoint_dir + "/" + model_name + "-Augmentation-" + conf.task_info.Augmentation_Method + "-Pretrain" + str(
                conf.train.pretrain_num_epochs) + "-Batch" + str(
                    conf.train.batch_size)
        else:
            model_file_prefix = conf.checkpoint_dir + "/" + model_name + "-WeakSupervision-" + "-Pretrain" + str(
                conf.train.pretrain_num_epochs) + "-Batch" + str(
                    conf.train.batch_size)
    else:
        model_file_prefix = conf.checkpoint_dir + "/" + model_name + "-Batch" + str(
            conf.train.batch_size)
    for epoch in range(conf.train.start_epoch,
                       conf.train.start_epoch + conf.train.num_epochs):
        start_time = time.time()
        trainer_target.train(train_data_loader, model_target, optimizer_target,
                             "Train", epoch)
        trainer_target.eval(train_data_loader, model_target, optimizer_target,
                            "Train", epoch)
        performance = trainer_target.eval(validate_data_loader, model_target,
                                          optimizer_target, "Validate", epoch)
        trainer_target.eval(test_data_loader, model_target, optimizer_target,
                            "Test", epoch)
        if performance > best_performance:  # record the best model
            best_epoch = epoch
            best_performance = performance
            temp_model = model_target
            save_checkpoint(
                {
                    'epoch': epoch,
                    'model_name': model_name,
                    'state_dict': model_target.state_dict(),
                    'best_performance': best_performance,
                    'optimizer': optimizer_target.state_dict(),
                }, model_file_prefix)
        time_used = time.time() - start_time
        logger.info("Epoch %d cost time: %d second" % (epoch, time_used))

    logger.info("The Best Performance on Validation Data and Test Data")
    #best_epoch_file_name = model_file_prefix + "_" + str(best_epoch)
    #best_file_name = model_file_prefix + "_best"
    #shutil.copyfile(best_epoch_file_name, best_file_name)
    #load_checkpoint(model_file_prefix + "_" + str(best_epoch), conf, model,
    #                optimizer)
    model = temp_model
    trainer_target.eval(train_data_loader, model, optimizer_target,
                        "Best Train", best_epoch)
    trainer_target.eval(validate_data_loader, model, optimizer_target,
                        "Best Validate", best_epoch)
    trainer_target.eval(test_data_loader, model, optimizer_target, "Best Test",
                        best_epoch)
Exemplo n.º 16
0
def main(config):
	device = torch.device(config['device'])

	##### Setup Dirs #####
	experiment_dir = config['path']['experiments'] + config['name']
	util.mkdir_and_rename(
                experiment_dir)  # rename experiment folder if exists
	util.mkdirs((experiment_dir+'/sr_images', experiment_dir+'/lr_images'))

	##### Setup Logger #####
	logger = util.Logger('test', experiment_dir, 'test_' + config['name'])

	##### print Experiment Config
	logger.log(util.dict2str(config))
	
	###### Load Dataset #####
	testing_data_loader = dataset.get_test_sets(config['dataset'], logger)

	trainer = create_model(config, logger)
	trainer.print_network_params(logger)

	total_avg_psnr = 0.0
	total_avg_ssim = 0.0

	for name, test_set in testing_data_loader.items():
		logger.log('Testing Dataset {:s}'.format(name))
		valid_start_time = time.time()
		avg_psnr = 0.0
		avg_ssim = 0.0
		idx = 0
		for i, batch in enumerate(test_set):
			idx += 1
			img_name = batch[2][0][batch[2][0].rindex('/')+1:]
			# print(img_name)
			img_name = img_name[:img_name.index('.')]
			img_dir_sr = experiment_dir+'/sr_images'
			img_dir_lr = experiment_dir+'/lr_images'
			util.mkdir(img_dir_sr)
			infer_time = trainer.test(batch)
			visuals = trainer.get_current_visuals()
			lr_img = util.tensor2img(visuals['LR'])
			sr_img = util.tensor2img(visuals['SR'])  # uint8
			gt_img = util.tensor2img(visuals['HR'])  # uint8
			save_sr_img_path = os.path.join(img_dir_sr, '{:s}.png'.format(img_name))
			save_lr_img_path = os.path.join(img_dir_lr, '{:s}.png'.format(img_name))
			util.save_img(lr_img, save_lr_img_path)
			util.save_img(sr_img, save_sr_img_path)
			crop_size = config['dataset']['scale']
			psnr, ssim = util.calc_metrics(sr_img, gt_img, crop_size)
			#logger.log('[ Image: {:s}  PSNR: {:.4f} SSIM: {:.4f} Inference Time: {:.8f}]'.format(img_name, psnr, ssim, infer_time))
			avg_psnr += psnr
			avg_ssim += ssim
		avg_psnr = avg_psnr / idx
		avg_ssim = avg_ssim / idx
		valid_t = time.time() - valid_start_time
		logger.log('[ Set: {:s} Time:{:.3f}] PSNR: {:.2f} SSIM {:.4f}'.format(name, valid_t, avg_psnr, avg_ssim))
		
		iter_start_time = time.time()

		total_avg_ssim += avg_ssim
		total_avg_psnr += avg_psnr

	total_avg_ssim /= len(testing_data_loader)
	total_avg_psnr /= len(testing_data_loader)
	
	logger.log('[ Total Average of Sets: PSNR: {:.2f} SSIM {:.4f}'.format(total_avg_psnr, total_avg_ssim))
Exemplo n.º 17
0
 def _ready(self):
     self.add_to_group("has_arch")
     self.parent = self.get_parent()
     self.logger = util.Logger()
     self.time = 0.0
Exemplo n.º 18
0
def train(args, network, train_batches, dev_batches, log=None):
    """Estimate model parameters on `train_batches`
    with early stopping on`dev_batches`"""
    # Logger
    log = log or util.Logger(verbose=args.verbose, flush=True)
    # Optimizer
    trainer = dy.AdamTrainer(network.pc, alpha=args.lr)
    # Start training
    log("Starting training")
    best_accuracy = 0
    deadline = 0
    running_nll = n_processed = 0
    report_every = ceil(len(train_batches) / 10)
    # Start training
    for epoch in range(1, args.n_epochs + 1):
        # Time the epoch
        start_time = time.time()
        for batch, y in train_batches:
            # Renew the computation graph
            dy.renew_cg()
            # Initialize layers
            network.init(test=False, update=True)
            # Compute logits
            logits = network(batch)
            # Loss function
            nll = dy.mean_batches(dy.pickneglogsoftmax_batch(logits, y))
            # Backward pass
            nll.backward()
            # Update the parameters
            trainer.update()
            # Keep track of the nll
            running_nll += nll.value() * batch.batch_size
            n_processed += batch.batch_size
            # Print the current loss from time to time
            if train_batches.just_passed_multiple(report_every):
                avg_nll = running_nll / n_processed
                log(f"Epoch {epoch}@{train_batches.percentage_done():.0f}%: "
                    f"NLL={avg_nll:.3f}")
                running_nll = n_processed = 0
        # End of epoch logging
        avg_nll = running_nll / n_processed
        log(f"Epoch {epoch}@100%: NLL={avg_nll:.3f}")
        log(f"Took {time.time()-start_time:.1f}s")
        log("=" * 20)
        # Validate
        accuracy = evaluate(args, network, dev_batches)
        # Print final result
        log(f"Dev accuracy: {accuracy*100:.2f}%")
        # Early stopping
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            dynn.io.save(network.pc, args.model_file)
            deadline = 0
        else:
            if deadline < args.patience:
                dynn.io.populate(network.pc, args.model_file)
                trainer.learning_rate *= args.lr_decay
                deadline += 1
            else:
                log("Early stopping with best accuracy "
                    f"{best_accuracy*100:.2f}%")
                break
    # Load best model
    dynn.io.populate(network.pc, args.model_file)
    return best_accuracy
Exemplo n.º 19
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("job", choices=["ps", "worker"])
    parser.add_argument("task", type=int)
    parser.add_argument("--animate", default=False, action='store_true')
    parser.add_argument("--env", default='Pendulum-v0')
    parser.add_argument("--seed", default=12321, type=int)
    parser.add_argument("--tboard", default=False)
    parser.add_argument("--worker_num", default=4, type=int)  #worker jobs
    parser.add_argument("--ps_num", default=2, type=int)  #ps jobs
    parser.add_argument("--initport", default=2849,
                        type=int)  #starting ports for cluster
    parser.add_argument("--stdout_freq", default=20, type=int)
    parser.add_argument("--save_every", default=600, type=int)  #save frequency
    parser.add_argument("--outdir", default=os.path.join(
        'tmp', 'logs'))  # file for the statistics of training
    parser.add_argument("--checkpoint_dir",
                        default=os.path.join(
                            'tmp', 'checkpoints'))  #where to save checkpoint
    parser.add_argument("--frames", default=1,
                        type=int)  #how many recent frames to send to model
    parser.add_argument("--mode",
                        choices=["train", "debug-light", "debug-full"],
                        default="train")  #how verbose to print to stdout
    parser.add_argument(
        "--desired_kl", default=0.002, type=float
    )  #An important param to tune. The learning rate is adjusted when KL dist falls
    #far above or below the desired_kl
    args = parser.parse_args()

    ANIMATE = args.animate and args.task == 0 and args.job == 'worker'
    INITPORT = args.initport
    CLUSTER = dict()
    workers = []
    ps_ = []
    for i in range(args.ps_num):
        ps_.append('localhost:{}'.format(INITPORT + i))
    for i in range(args.worker_num):
        workers.append("localhost:{}".format(i + args.ps_num + INITPORT))
    CLUSTER['worker'] = workers
    CLUSTER['ps'] = ps_
    LOG_FILE = os.path.join(args.outdir, 'worker_{}.log'.format(
        args.task)) if args.job == 'worker' else 'N/A'
    RANDOM_SEED = args.seed + args.task
    checkpoint_basename = 'model' + '-' + args.env.split('-')[0]

    logger = U.Logger(logfile=LOG_FILE) if args.job == 'worker' else None
    print("Starting {} {} with log at {}".format(args.job, args.task,
                                                 LOG_FILE))
    process_fn(cluster=CLUSTER,
               task_id=args.task,
               job=args.job,
               logger=logger,
               env_id=args.env,
               animate=ANIMATE,
               random_seed=RANDOM_SEED,
               save_path=args.checkpoint_dir,
               stack_frames=args.frames,
               save_every=args.save_every,
               run_mode=args.mode,
               desired_kl=args.desired_kl,
               checkpoint_basename=checkpoint_basename,
               stdout_freq=args.stdout_freq)
Exemplo n.º 20
0
def eval(conf):
    logger = util.Logger(conf)
    model_name = conf.model_name
    dataset_name = "ClassificationDataset"
    collate_name = "FastTextCollator" if model_name == "FastText" \
        else "ClassificationCollator"

    test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)
    collate_fn = globals()[collate_name](conf, len(test_dataset.label_map))
    test_data_loader = DataLoader(
        test_dataset, batch_size=conf.eval.batch_size, shuffle=False,
        num_workers=conf.data.num_worker, collate_fn=collate_fn,
        pin_memory=True)

    empty_dataset = globals()[dataset_name](conf, [])
    model = get_classification_model(model_name, empty_dataset, conf)
    optimizer = get_optimizer(conf, model)
    load_checkpoint(conf.eval.model_dir, conf, model, optimizer)
    model.eval()
    is_multi = False
    if conf.task_info.label_type == ClassificationType.MULTI_LABEL:
        is_multi = True
    predict_probs = []
    standard_labels = []
    evaluator = cEvaluator(conf.eval.dir)
    for batch in test_data_loader:
        with torch.no_grad():
            logits = model(batch)
        if not is_multi:
            result = torch.nn.functional.softmax(logits, dim=1)
        else:
            result = torch.sigmoid(logits)
        result = result.detach().cpu().tolist()
        predict_probs.extend(result)
        standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])
    if conf.eval.is_flat:
        (_, precision_list, recall_list, fscore_list, right_list,
         predict_list, standard_list, pak_dict, rak_dict, rpak_dict, ndcgak_dict) = \
            evaluator.evaluate(
                predict_probs, standard_label_ids=standard_labels, label_map=empty_dataset.label_map,
                threshold=conf.eval.threshold, top_k=conf.eval.top_k,
                is_flat=conf.eval.is_flat, is_multi=is_multi,
                debug_file_name=conf.eval.debug_file_name,
                is_label_split=conf.data.generate_label_group,
                label_split_json_file=os.path.join(conf.data.dict_dir,
                                                   "{}.json".format(ClassificationDataset.DOC_LABEL_GROUP)),
                instance_remove=conf.eval.instance_remove
            )
        sup_message = ""
        for i in range(1, conf.eval.top_k + 1):
            for group in pak_dict[i]:
                sup_message += "Precision at {} of {} group: {}, ".format(i, group, pak_dict[i][group])
                sup_message += "Recall at {} of {} group: {}, ".format(i, group, rak_dict[i][group])
                sup_message += "R-Precision at {} of {} group: {}, ".format(i, group, rpak_dict[i][group])
                sup_message += "nDCG at {} of {} group: {}, ".format(i, group, ndcgak_dict[i][group])

        message = "Performance is precision: {}, recall: {}, fscore: {}, " + \
                  "macro-fscore: {}, right: {}, predict: {}, standard: {}, "
        logger.warn(message.format(
            precision_list[0][cEvaluator.MICRO_AVERAGE],
            recall_list[0][cEvaluator.MICRO_AVERAGE],
            fscore_list[0][cEvaluator.MICRO_AVERAGE],
            fscore_list[0][cEvaluator.MACRO_AVERAGE],
            right_list[0][cEvaluator.MICRO_AVERAGE],
            predict_list[0][cEvaluator.MICRO_AVERAGE],
            standard_list[0][cEvaluator.MICRO_AVERAGE]) +
            sup_message)
    else:
        (_, precision_list, recall_list, fscore_list, right_list,
         predict_list, standard_list) = \
            evaluator.evaluate(
                predict_probs, standard_label_ids=standard_labels, label_map=empty_dataset.label_map,
                threshold=conf.eval.threshold, top_k=conf.eval.top_k,
                is_flat=conf.eval.is_flat, is_multi=is_multi,
                is_label_split=conf.data.generate_label_group,
                label_split_json_file=os.path.join(conf.data.dict_dir,
                                                   "{}.json".format(ClassificationDataset.DOC_LABEL_GROUP))
            )
        logger.warn(
            "Performance is precision: %f, "
            "recall: %f, fscore: %f, right: %d, predict: %d, standard: %d." % (
                precision_list[0][cEvaluator.MICRO_AVERAGE],
                recall_list[0][cEvaluator.MICRO_AVERAGE],
                fscore_list[0][cEvaluator.MICRO_AVERAGE],
                right_list[0][cEvaluator.MICRO_AVERAGE],
                predict_list[0][cEvaluator.MICRO_AVERAGE],
                standard_list[0][cEvaluator.MICRO_AVERAGE]))
    evaluator.save()
Exemplo n.º 21
0
        elif args.drl == 'a3c_lstm':
            model = ModelA3CLstm(args,
                                 'global',
                                 len(legal_actions),
                                 thread_no=-1)
        elif args.drl == '1q':
            model = Model(args, 'global', len(legal_actions), thread_no=-1)

        global_list = model.prepare_global(args.rms_decay, args.rms_epsilon)
        global_sess = global_list[0]
        global_vars = global_list[1]

        if save_file is not None:  # retrain
            current_time = time.strftime('%Y%m%d_%H%M%S')
            log_file = "output/%s_%s.log" % (args.game, current_time)
            util.Logger(log_file)
            print 'Resume trainig: %s' % save_file

            for i in range(args.thread_no):
                with open(save_file + '.pickle') as f:
                    player = pickle.load(f)
                    player.train_start = current_time
                    player.thread_no = i
                    if i == 0:
                        player.print_env()
                    player.set_global_list(global_list)
                    player.initialize_post()
                    playerList.append(player)

            model.init_global(global_sess)
Exemplo n.º 22
0
                    type=float,
                    default=0.0006,
                    help='weight decay rate')
parser.add_argument('--epochs', type=int, default=200, help='')
parser.add_argument('--print_every', type=int, default=10, help='')
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--expid', type=int, default=1, help='experiment id')
args = parser.parse_args()

# data path and adjacency matrix path
outflow_path = f'./final_data/{args.city}/outflow{args.tinterval}'
adj_path = f'./final_data/{args.city}/{args.city}_adj_mx.csv'
save_path = f'./experiment/{args.city}/outflow{args.tinterval}'

log_path = pjoin('./result', 'train', args.city, f'{args.tinterval}')
logger = util.Logger(pjoin(log_path, 'test.log'))
logger.write(f'\nTesting configs: {args}')
# use tensorboard to draw the curves.
train_writer = SummaryWriter(
    pjoin('./result', 'train', args.city, f'{args.tinterval}'))
val_writer = SummaryWriter(
    pjoin('./result', 'val', args.city, f'{args.tinterval}'))

num_nodes = 165 if args.city == 'shenzhen' else 81


def main():
    # set seed
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
Exemplo n.º 23
0
def main():
    MAIN_T = time.time()
    p = argparse.ArgumentParser(
        description='Given two images, determine the convolution kernel so that '
        'a * k = b')
    p.add_argument('a', help='input image filename')
    p.add_argument('b', help='expected image filename')
    p.add_argument('k', help='kernel directory')
    p.add_argument(
        '-n',
        type=int,
        default=5,
        help='kernel size is NxN (default: 5, or automatically set to size '
        'of loaded kernel)')
    p.add_argument(
        '-sym',
        type=boolchoice,
        default=True,
        choices=[True, False],
        help='kernel will be symmetric if set to True (default: True)')
    p.add_argument(
        '-gamma',
        type=float,
        default=1.0,
        help='gamma correction to use for images (default: no correction)')
    p.add_argument(
        '-reg_cost',
        type=float,
        default=0.,
        help='regularization cost: the sum of weights is multiplied by this '
        'and added to the cost (default: zero: no regularization)')
    p.add_argument(
        '-border',
        type=int,
        default=-1,
        help='how many pixels to remove from the border (from every edge of the '
        'image) before calculating the difference (default: auto based on '
        'kernel size)')
    p.add_argument('-learn_rate',
                   type=float,
                   default=2.**-10,
                   help='learning rate for the optimizer')
    p.add_argument('-epsilon',
                   type=float,
                   default=.09,
                   help='epsilon for the optimizer')
    p.add_argument(
        '-max_steps',
        type=int,
        default=0,
        help='stop after this many steps (default: zero: never stop)')
    p.add_argument('-log_every',
                   type=int,
                   default=100,
                   help='log stats every N steps (0 to disable)')
    p.add_argument('-save_every',
                   type=int,
                   default=500,
                   help='save kernel and image every N steps (0 to disable)')
    p.add_argument('-crop_x',
                   type=int,
                   default=0,
                   help='crop X offset in pixels, range is [0..width-1]')
    p.add_argument(
        '-crop_y',
        type=int,
        default=0,
        help=
        'crop Y offset in pixels, range is [0..height-1] where 0 is the TOP')
    p.add_argument('-crop_w', type=int, default=0, help='crop width in pixels')
    p.add_argument('-crop_h',
                   type=int,
                   default=0,
                   help='crop height in pixels')
    p.add_argument(
        '-fps',
        type=float,
        default=5,
        help='how often to update the viewer, set to zero to disable viewer')
    args = p.parse_args()

    if not os.path.exists(args.k):
        os.mkdir(args.k)
        step = -1
    else:
        step, w1 = util.load_kernel(args.k)
        args.n = w1.shape[0]

    if step >= args.max_steps and args.max_steps != 0:
        print('Current step %d is over max %d. Exiting.' %
              (step, args.max_steps))
        return 0

    log = util.Logger(args.k + '/log.txt')
    log.log('--- Start of run ---')
    log.log('Cmdline:', sys.argv)

    # Load images.
    img1 = util.load_image(args.a, args)
    img2 = util.load_image(args.b, args)
    assert img1.shape == img2.shape, (img1.shape, img2.shape)
    log.log('Loaded images. Shape is', img1.shape, '(NHWC)')

    vimg1 = util.vis_nhwc(img1, doubles=0, gamma=args.gamma)
    vimg2 = util.vis_nhwc(img2, doubles=0, gamma=args.gamma)

    # Load and initialize weights.
    if step >= 0:
        log.log('Loaded weights, shape is', w1.shape, '(HWIO)')
    else:
        assert step == -1, step
        step = 0
        log.log('Starting with random weights.')
        w1 = np.random.normal(size=(args.n, args.n, 1, 1),
                              scale=.2).astype(np.float32)
        m = args.n // 2
        w1[m, m, 0, 0] = 1.  # Bright middle pixel.
    if args.sym:
        w1 = util.make_symmetric(w1)
    else:
        w1 = tf.Variable(w1)

    if args.border == -1:
        args.border = (args.n + 1) // 2
        log.log('Automatically set border to', args.border)

    log.log('Current args:', args.__dict__)
    log.log('Starting at step', step)

    # Convolution.
    input_img = tf.constant(img1)
    expected_img = tf.constant(img2)
    actual_img = util.convolve(input_img, w1)  # <-- THIS IS THE CALCULATION.

    # Cost.
    diff = util.diff(actual_img, expected_img, args.border)
    diffcost = util.diff_cost(diff)  # L2
    cost = diffcost

    # Regularization.
    reg = util.reg_cost(w1)  # L1
    if args.reg_cost != 0:
        cost += reg * args.reg_cost

    # Optimizer.
    global_step = tf.Variable(step,
                              dtype=tf.int32,
                              trainable=False,
                              name='global_step')
    train_step = tf.train.AdamOptimizer(
        args.learn_rate, args.epsilon).minimize(cost, global_step=global_step)

    log.log('Starting TF session.')
    sess = util.make_session(outdir=args.k)

    # Get ready for viewer.
    log_last_step = [step]
    log_last_time = [time.time()]

    def periodic_log():
        """Does a log.log() of stats like step number and current error."""
        now = time.time()
        rstep, rcost, rreg, rdiffcost = sess.run(
            [global_step, cost, reg, diffcost])
        if log_last_step[0] == rstep: return  # Dupe call.
        log.log(
            'steps',
            rstep,
            'total-cost %.9f' % rcost,
            'diffcost %.9f' % rdiffcost,
            'reg %.9f' % rreg,
            'avg-px-err %.6f' % util.avg_px_err(rdiffcost, args.gamma),
            'steps/sec %.2f' % ((rstep - log_last_step[0]) /
                                (now - log_last_time[0])),
        )
        log_last_step[0] = rstep
        log_last_time[0] = now

    render_time = [0.]

    def render():
        """Returns an image showing the current weights and output."""
        # TODO: vertically align labels.
        t0 = time.time()
        rout, rdiff, rw = sess.run([actual_img, diff, w1])
        render_out = util.vstack([
            util.hstack([
                util.vstack([util.cache_label('input:'), vimg1], 5),
                util.vstack([
                    util.cache_label('actual:'),
                    util.vis_nhwc(rout, doubles=0, gamma=args.gamma)
                ], 5),
                util.vstack([util.cache_label('expected:'), vimg2], 5),
            ], 5),
            util.cache_label('difference:'),
            util.vis_nhwc(rdiff, doubles=0),
            util.cache_label('kernel:'),
            util.vis_hwoi(rw, doubles=2),
        ], 5)
        render_out = util.border(render_out, 5)
        t1 = time.time()
        render_time[0] += t1 - t0
        return render_out

    def periodic_save():
        rstep, rdiffcost, rw = sess.run([global_step, diffcost, w1])
        util.save_kernel(args.k, rstep, rw)
        rfn = args.k + '/render-step%08d-diff%.9f.png' % (rstep, rdiffcost)
        util.save_image(rfn, render())

    calc_time = [0.]

    def calc_fn():
        """
    Run train_step.
    Then do every-N-steps housekeeping.
    """
        t0 = time.time()
        sess.run(train_step)  # <--- THIS IS WHERE THE MAGIC HAPPENS.
        t1 = time.time()
        calc_time[0] += t1 - t0
        nsteps = sess.run(global_step)
        if args.log_every != 0:
            if nsteps == 1 or nsteps % args.log_every == 0:
                periodic_log()
        if args.save_every != 0:
            if nsteps % args.save_every == 0:
                periodic_save()
        if args.max_steps == 0:
            return True  # Loop forever.
        return nsteps < args.max_steps

    log.log('Start optimizer.')
    START_T = time.time()
    if args.fps == 0:
        while True:
            if not calc_fn(): break
    else:
        util.viewer(calc_fn, render, fps=args.fps, hang=False)
    STOP_T = time.time()
    # Final log and save.
    log.log('Stop optimizer.')
    log.log('Render time %.3fs (%.02f%% of optimizer)' %
            (render_time[0], 100. * render_time[0] / (STOP_T - START_T)))
    periodic_log()
    periodic_save()
    nsteps = sess.run(global_step) - step
    log.log('Steps this session %d, calc time %.3fs (%.02f%% of optimizer)' %
            (nsteps, calc_time[0], 100. * calc_time[0] / (STOP_T - START_T)))
    log.log('Calc steps/sec %.3f, with overhead steps/sec %.3f' %
            (nsteps / calc_time[0], nsteps / (STOP_T - START_T)))
    END_T = time.time()
    log.log('Total time spent: %.3fs' % (END_T - INIT_T))
    for k, v in [
        ('before main', MAIN_T - INIT_T),
        ('setting up', START_T - MAIN_T),
        ('optimizing', STOP_T - START_T),
        ('finishing up', END_T - STOP_T),
    ]:
        log.log(' - time spent %s: %.3fs (%.02f%% of total)' %
                (k, v, 100. * v / (END_T - INIT_T)))
    log.close()
Exemplo n.º 24
0
def train_process(opts, training_data, valid_data):
    # Metric calculation and init
    opts.iterations_per_epoch = training_data._size / (opts.batch_size * opts.batches_per_step)
    opts.steps_per_valid_log = math.ceil(opts.iterations_per_epoch / opts.valid_per_epoch)
    opts.iterations = math.ceil(opts.epochs * opts.iterations_per_epoch)
    assert opts.mov_mean_window < opts.iterations, "Choose a moving mean window smaller than the number of iterations. To do all iterations, set to 0"
    lr_scheduler = opts.lr_schedule_type(opts, verbose=True)
    train_logger = util.Logger(opts, mode=util.Modes.TRAIN)
    if not opts.no_validation:
        opts.validation_batches_per_step = valid_data._size // opts.validation_batch_size
        shared_history = multiprocessing.Array('d', opts.iterations)
        val_logger = util.Logger(opts, mode=util.Modes.VALID, history_array=shared_history if opts.multiprocessing else [])

    if opts.multiprocessing:
        process = util.ParallelProcess(target=validation_process, args=(opts, valid_data, lr_scheduler, val_logger))

    # Build and compile training graph
    print("Building training graph")
    train = generic_graph(opts, training_data, util.Modes.TRAIN)
    compile_graph(opts, train)

    # Build and compile validation graph if not in a separate process
    if not opts.no_validation and not opts.multiprocessing:
        valid = validation_process(opts, valid_data)

    # Training loop
    print("Begin training loop")
    for i in range(opts.iterations):
        if not opts.multiprocessing and not opts.no_validation:
            # When interleaving, run a dummy op to load the session onto the IPU before timing throughput
            train.session.run(train.ops, feed_dict={train.placeholders['learning_rate']: 0})
        # Run the graph once
        loss, batch_time = run(train, learning_rate=lr_scheduler.lr, i=i+1)

        # Aggregate and print stats
        train_logger.update(i, batch_time=batch_time, loss=loss)

        # If we're only compiling report, do so and stop at epoch 0
        if i == 0 and opts.compiler_report:
            generate_report(train)
            return

        # Validation on first, last and scheduled steps
        if not opts.no_validation and (i in [0, opts.iterations-1] or not (i+1) % opts.steps_per_valid_log):
            filepath = train.saver.save(train.session, opts.checkpoint_path)
            if opts.multiprocessing:
                process.queue.put((i + 1, filepath))
                time.sleep(0)
            else:
                valid.saver.restore(valid.session, filepath)
                if not opts.multiprocessing and not opts.no_validation:
                    # When interleaving, run a dummy op to load the session onto the IPU before timing throughput
                    valid.session.run(valid.ops)
                val_rmspe, val_batch_time = run(valid, i=i+1)
                val_logger.update(i, batch_time=val_batch_time, batch_acc=val_rmspe)

        # Schedule the learning rate based on val accuracy, but if that's not available, then training loss
        # If we're multiprocessing, then schedule inside the subprocess
        if not opts.multiprocessing:
            lr_scheduler.schedule(loss if opts.no_validation else val_rmspe, i)

    # Clean up
    train.session.close()
    if not opts.no_validation:
        if opts.multiprocessing:
            process.cleanup()
        else:
            valid.session.close()

    # Print best RMSPE
    if not opts.no_validation:
        rmspe_list = [x for x in val_logger.history[:] if x > 0]
        if rmspe_list:
            print(f'Best RMSPE: {min(rmspe_list):6.4f}')
        else:
            print("There have been no valid RMSPE results.")
Exemplo n.º 25
0
import datetime
import json
import os
import requests
import urllib3

import falcon
import psutil

try:
    import model
    import util
except ModuleNotFoundError:
    print('common package not in python path')

logger = util.Logger(__name__)

CRAWLER_ENDPOINT = 'https://crawler.run-it-down.lol/'
REPORT_ENDPOINT = 'https://reporter.run-it-down.lol/'


class Analyze:
    def on_get(self, req, resp):
        logger.info('/GET analyze')

        params = req.params
        if "summonerName" not in params or "summonerNameBuddy" not in params:
            resp.status = 400
            return

        # surpress tls check
Exemplo n.º 26
0
def main():
    # Handle parameters
    args = util.get_args()

    # Select gpu
    device = torch.device(args.device)
    args.device = device

    # Load data
    train_loader, val_loader, test_loader = util.get_dataloader(args)

    train_dataloader, train_val_dataloader = train_loader
    val_dataloader, val_val_dataloader = val_loader
    test_dataloader, test_val_dataloader = test_loader

    args.train_size, args.nSeries = train_dataloader.dataset.X.shape
    args.val_size, args.val_nSeries = val_dataloader.dataset.X.shape
    args.test_size, args.test_nSeries = test_dataloader.dataset.X.shape

    # Create logger
    logger = util.Logger(args)

    # Display arguments
    util.print_args(args)

    # Create model
    model = models.get_model(args)

    # Create imputation engine

    engine = util.ImpEngine.from_args(model, scaler=None, args=args)

    # Training

    if args.impset == 'train':
        data_loader = train_dataloader
        val_loader = train_val_dataloader
    elif args.impset == 'val':
        data_loader = val_dataloader
        val_loader = val_val_dataloader
    elif args.impset == 'test':
        data_loader = test_dataloader
        val_loader = test_val_dataloader
    else:
        raise NotImplementedError

    if not args.test:
        iterator = trange(args.num_epoch)

        try:
            if os.path.isfile(logger.best_model_save_path):
                print('Model checkpoint exist!')
                print('Load model checkpoint? (y/n)')
                _in = input()
                if _in == 'y' or _in == 'yes':
                    print('Loading model...')
                    engine.model.load_state_dict(
                        torch.load(logger.best_model_save_path))
                else:
                    print('Training new model')

            for epoch in iterator:
                loss = engine.train(data_loader)
                engine.scheduler.step()
                with torch.no_grad():
                    # metrics = (val_loss, rse, mae, mape, mse, rmse)
                    Xhat_val, val_metrics = engine.validation(
                        data_loader, val_loader)

                    m = dict(train_loss=loss,
                             val_loss=val_metrics[0],
                             val_rse=val_metrics[1],
                             val_mae=val_metrics[2],
                             val_mape=val_metrics[3],
                             val_mse=val_metrics[4],
                             val_rmse=val_metrics[5])

                # report stats
                description = logger.summary(m, engine.model)
                if logger.stop:
                    break
                description = 'Epoch: {} '.format(epoch) + description
                iterator.set_description(description)

        except KeyboardInterrupt:
            pass

    # data recovery
    engine.model.load_state_dict(torch.load(logger.best_model_save_path))
    with torch.no_grad():
        # metrics = (rse, mae, mape, mse, rmse)
        imp_X, metrics, metrics_li = engine.imputation(data_loader)

        m = dict(imp_rse=metrics[0],
                 imp_mae=metrics[1],
                 imp_mape=metrics[2],
                 imp_mse=metrics[3],
                 imp_rmse=metrics[4])
        # m_li = dict(imp_rse=metrics_li[0], imp_mae=metrics_li[1], imp_mape=metrics_li[2], imp_mse=metrics_li[3],
        #             imp_rmse=metrics_li[4])

        logger.imputation_summary(m=m,
                                  X=data_loader.dataset.X,
                                  imp_X=imp_X,
                                  W=data_loader.dataset.W,
                                  save_imp=True)
Exemplo n.º 27
0
def main():
    args = parser.parse_args()
    LOG_FILE = args.outdir
    ANIMATE = args.animate
    DEBUG = (args.mode == "debug")
    CHECKPOINT_PATH = args.checkpoint_dir + '-' + args.env.split('-')[0]
    MAX_ROLLS = 7
    ITER = 5000000
    LOG_ROUND = 10
    env, MAX_PATH_LENGTH, EP_LENGTH_STOP = get_roll_params(args.env)

    desired_kl = args.desired_kl
    max_lr, min_lr = 1., 1e-6

    framer = Framer(frame_num=args.frames)
    log_gamma_schedule = U.LinearSchedule(init_t=100,
                                          end_t=3000,
                                          init_val=-2,
                                          end_val=-8,
                                          update_every_t=100)  #This is base 10
    log_beta_schedule = U.LinearSchedule(init_t=100,
                                         end_t=3000,
                                         init_val=0,
                                         end_val=-4,
                                         update_every_t=100)  #This is base 10
    rew_to_advs = PathAdv(gamma=0.98, look_ahead=40)
    logger = U.Logger(logfile=LOG_FILE)
    np.random.seed(args.seed)
    env.seed(args.seed)

    if type(env.action_space) == gym.spaces.discrete.Discrete:
        act_type = 'disc'
        ac_dim, ac_scale = env.action_space.n, None
        print('Discrete Action Space. Numer of actions is {}.'.format(
            env.action_space.n))
    else:
        act_type = 'cont'
        ac_dim, ac_scale = env.action_space.shape[0], np.maximum(
            env.action_space.high, np.abs(env.action_space.low))
        print('Continuous Action Space. Action Scale is {}.'.format(ac_scale))
    ob_dim = env.observation_space.shape[0] * args.frames
    critic = pol.Critic(num_ob_feat=ob_dim)
    actor = pol.Actor(num_ob_feat=ob_dim,
                      ac_dim=ac_dim,
                      act_type=act_type,
                      ac_scale=ac_scale)
    saver = tf.train.Saver(max_to_keep=3)

    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter(
        os.path.join('summaries',
                     args.outdir.split('.')[0] + '.data'),
        tf.get_default_graph())

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        tot_rolls = 0

        for i in range(ITER):
            ep_obs, ep_advs, ep_logps, ep_target_vals, ep_acs = [], [], [], [], []
            ep_rews = []
            tot_rews, tot_ent, rolls = 0, 0, 0
            while len(ep_rews) < EP_LENGTH_STOP and rolls < MAX_ROLLS:
                path = rollout(env=env,
                               sess=sess,
                               policy=actor.act,
                               max_path_length=MAX_PATH_LENGTH,
                               framer=framer,
                               render=rolls == 0 and i % 20 == 0 and ANIMATE)
                obs_aug = framer.full(path['obs'])
                ep_obs += obs_aug[:-1]
                ep_logps += path['logps']
                ep_acs += path['acs']
                tot_ent += path['entropy']
                obs_vals = critic.value(obs=obs_aug, sess=sess).reshape(-1)
                target_val, advs = rew_to_advs(rews=path['rews'],
                                               terminal=path['terminated'],
                                               vals=obs_vals)
                ep_target_vals += list(target_val)
                ep_advs += list(advs)
                ep_rews += path['rews']
                tot_rews += sum(path['rews'])

                if rolls == 0 and i % 10 == 0 and DEBUG:
                    actor.printoo(obs=ep_obs, sess=sess)
                    critic.printoo(obs=ep_obs, sess=sess)
                    print('Path length %d' % len(path['rews']))
                    print('Terminated {}'.format(path['terminated']))
                rolls += 1

            avg_rew = float(tot_rews) / rolls
            avg_ent = tot_ent / float(len(ep_logps))
            ep_obs, ep_advs, ep_logps, ep_target_vals, ep_acs, ep_rews = U.make_np(
                ep_obs, ep_advs, ep_logps, ep_target_vals, ep_acs, ep_rews)
            ep_advs.reshape(-1)
            ep_target_vals.reshape(-1)
            ep_advs = (ep_advs - np.mean(ep_advs)) / (1e-8 + np.std(ep_advs))

            if i % 50 == 13 and DEBUG:
                perm = np.random.choice(len(ep_advs), size=20)
                print('Some targets', ep_target_vals[perm])
                print('Some preds', critic.value(ep_obs[perm], sess=sess))
                print('Some logps', ep_logps[perm])

            cir_loss, ev_before, ev_after = train_ciritic(
                critic=critic, sess=sess, obs=ep_obs, targets=ep_target_vals)
            act_loss = train_actor(actor=actor,
                                   sess=sess,
                                   obs=ep_obs,
                                   advs=ep_advs,
                                   acs=ep_acs,
                                   logps=ep_logps)

            if args.tboard:
                summ, _, _ = sess.run([merged, actor.ac, critic.v],
                                      feed_dict={
                                          actor.ob: ep_obs,
                                          critic.obs: ep_obs
                                      })
                writer.add_summary(summ, i)
            #logz
            act_lr, cur_beta, cur_gamma = actor.get_opt_param(sess)
            kl_dist = actor.get_kl(sess=sess,
                                   obs=ep_obs,
                                   logp_feeds=ep_logps,
                                   acs=ep_acs)

            #updates the learning rate based on the observed kl_distance and its multiplicative distance to desired_kl
            if kl_dist < desired_kl / 4:
                new_lr = min(max_lr, act_lr * 1.5)
                actor.set_opt_param(sess=sess, new_lr=new_lr)
            elif kl_dist > desired_kl * 4:
                new_lr = max(min_lr, act_lr / 1.5)
                actor.set_opt_param(sess=sess, new_lr=new_lr)

            if log_gamma_schedule.update_time(i):
                new_gamma = np.power(10., log_gamma_schedule.val(i))
                actor.set_opt_param(sess=sess, new_gamma=new_gamma)
                print('\nUpdated gamma from %.4f to %.4f.' %
                      (cur_gamma, new_gamma))
            if log_beta_schedule.update_time(i):
                new_beta = np.power(10., log_beta_schedule.val(i))
                actor.set_opt_param(sess=sess, new_beta=new_beta)
                print('Updated beta from %.4f to %.4f.' % (cur_beta, new_beta))

            logger(i,
                   act_loss=act_loss,
                   circ_loss=np.sqrt(cir_loss),
                   avg_rew=avg_rew,
                   ev_before=ev_before,
                   ev_after=ev_after,
                   act_lr=act_lr,
                   print_tog=(i % 20) == 0,
                   kl_dist=kl_dist,
                   avg_ent=avg_ent)
            if i % 100 == 50:
                logger.flush()

            if i % args.save_every == 0:
                saver.save(sess, CHECKPOINT_PATH, global_step=tot_rolls)
            tot_rolls += rolls

    del logger
Exemplo n.º 28
0
    # special rules
    for key, value in [('MachineHoursCurrentMeter', 0)]:
        if key in row and row[key] == value:
            row.pop(key)
    if 'YearMade' in row and not (1950 < row['YearMade'] < 2012):
        row.pop('YearMade')
    for key in ['Engine_Horsepower', 'PrimaryUpper', 'fiManufacturerDesc']:
        if key in row:
            row.pop(key)
    return row


for tag, function in datasets.items():

    dataset = function()
    l = util.Logger(len(dataset), 20000, tag='init')

    # convert to dictionaries, clean and add to mongo
    for row in dataset.iterrows():
        l.step()
        row = row[1]
        row = dict(row)

        row = remove_bad_values(row)

        # update from machine index file
        m_row = machines[row['MachineID']]
        for key in m_row:
            if key not in row:
                row[key] = machines[row['MachineID']][key]
Exemplo n.º 29
0
    def __init__(self, config, logger=None):
        self.config = config
        if logger:
            self.logger = logger
        else:
            self.logger = util.Logger(config)
        self.dict_names = [
            "label", "token", "char", "custom_feature", "token_ngram",
            "char_ngram", "char_in_token"
        ]
        self.dict_files = []
        for dict_name in self.dict_names:
            self.dict_files.append(self.config.data.dict_dir + "/" +
                                   dict_name + ".dict")
        self.label_dict_file = self.dict_files[0]

        # Should keep all labels
        self.min_count = [
            0, self.config.feature_common.min_token_count,
            self.config.feature_common.min_char_count,
            self.config.var_len_feature.min_custom_feature_count,
            self.config.var_len_feature.min_token_ngram_count,
            self.config.var_len_feature.min_char_ngram_count,
            self.config.feature_common.min_char_count_in_token
        ]
        # Should keep all labels
        self.max_dict_size = \
            [1000 * 1000, self.config.feature_common.max_token_dict_size,
             self.config.feature_common.max_char_dict_size,
             self.config.var_len_feature.max_custom_feature_dict_size,
             self.config.var_len_feature.max_token_ngram_dict_size,
             self.config.var_len_feature.max_char_ngram_dict_size,
             self.config.feature_common.max_char_in_token_dict_size]
        # Label and custom feature has no max_sequence_length.
        self.max_sequence_length = \
            [0, self.config.fixed_len_feature.max_token_sequence_length,
             self.config.fixed_len_feature.max_char_sequence_length, 0]
        # Label and custom feature has no ngram.
        self.ngram_list = [
            0, self.config.var_len_feature.token_ngram,
            self.config.var_len_feature.char_ngram, 0
        ]
        self.label_map = dict()
        self.token_map = dict()
        self.char_map = dict()
        self.custom_feature_map = dict()
        self.token_gram_map = dict()
        self.char_gram_map = dict()
        self.char_in_token_map = dict()
        self.dict_list = [
            self.label_map, self.token_map, self.char_map,
            self.custom_feature_map, self.token_gram_map, self.char_gram_map,
            self.char_in_token_map
        ]

        self.id_to_label_map = dict()
        self.id_to_token_map = dict()
        self.id_to_char_map = dict()
        self.id_to_custom_feature_map = dict()
        self.id_to_token_gram_map = dict()
        self.id_to_char_gram_map = dict()
        self.id_to_char_in_token_map = dict()
        self.id_to_vocab_dict_list = [
            self.id_to_label_map, self.id_to_token_map, self.id_to_char_map,
            self.id_to_custom_feature_map, self.id_to_token_gram_map,
            self.id_to_char_gram_map, self.id_to_char_in_token_map
        ]

        self.train_text_file, self.validate_text_file, self.test_text_file = \
            self.config.data.train_text_file, \
            self.config.data.validate_text_file, \
            self.config.data.test_text_file

        self.tfrecord_files = [
            self.config.data.tfrecord_dir + "/" +
            os.path.basename(self.train_text_file) + ".tfrecord",
            self.config.data.tfrecord_dir + "/" +
            os.path.basename(self.validate_text_file) + ".tfrecord",
            self.config.data.tfrecord_dir + "/" +
            os.path.basename(self.test_text_file) + ".tfrecord"
        ]
        self.train_file, self.validate_file, self.test_file = \
            self.tfrecord_files

        self.feature_files = [
            self.config.data.tfrecord_dir + "/" +
            os.path.basename(self.train_text_file) + ".feature",
            self.config.data.tfrecord_dir + "/" +
            os.path.basename(self.validate_text_file) + ".feature",
            self.config.data.tfrecord_dir + "/" +
            os.path.basename(self.test_text_file) + ".feature"
        ]
        (self.train_feature_file, self.validate_feature_file,
         self.test_feature_file) = self.feature_files

        self.pretrained_embedding_files = [
            "",
            config.feature_common.token_pretrained_embedding_file,
            config.feature_common.char_pretrained_embedding_file,
            config.var_len_feature.custom_feature_pretrained_embedding_file,
        ]

        self.int_list_column = [
            "fixed_len_token", "var_len_token", "char_in_token",
            "char_in_token_real_len", "fixed_len_char", "var_len_char",
            "var_len_token_ngram", "var_len_char_ngram",
            "var_len_custom_feature"
        ]
        self.int_column = ["token_fixed_real_len", "char_fixed_real_len"]
        self.float_column = [
            "token_var_real_len", "char_var_real_len",
            "token_ngram_var_real_len", "char_ngram_var_real_len",
            "custom_feature_var_real_len"
        ]
Exemplo n.º 30
0
# -*- coding: utf-8 -*-
import threading, datetime, sys
import Model_Controller
import AI_Agent, util
print "输出重定向"
sys.stdout = util.Logger()


class GameStart():
    def __init__(self):
        pass
        # self.gc = Model_Controller.GameController()

    def start(self):
        pass

    def func_task(self):
        pass
        # print("执行任务中...")

    def func_timer(self):
        pass


if __name__ == "__main__":

    playerFlag = True
    stageFlag = True
    numOfPlayers = 1
    numOfStage = 0