示例#1
0
def get_model(args):
    model = Classifier(arch=args.arch,
                       ckpt=args.checkpoint,
                       pool_type=args.pool_type,
                       norm_type=args.norm_type)

    if args.pretrained:

        if args.pretrain_info is None or len(args.pretrain_info) != 2:
            raise ValueError("Invalid pretraining info.")
        save_folder, save_id = args.pretrain_info
        if isinstance(save_id, int):
            save_path = os.path.join("./saves", save_folder,
                                     f"save_{save_id:03d}.pth")
        else:
            save_path = os.path.join("./saves", save_folder,
                                     f"save_{save_id}.pth")

        state_dict = torch.load(save_path)
        if "module." in list(state_dict.keys())[0]:
            temp_state = OrderedDict()
            for k, v in state_dict.items():
                temp_state[k.split("module.")[-1]] = v
            state_dict = temp_state
        model.load_state_dict(state_dict)

    return model
def reset_models():
    feature_mapper = FeatureMapper()
    rpn = Rpn()
    classifier = Classifier()
    regr = Regr()

    random_image = tf.convert_to_tensor(np.random.random(
        (1, image_size, image_size, 3)),
                                        dtype=np.float32)
    random_features = tf.convert_to_tensor(np.random.random(
        (1, feature_size, feature_size, 12)),
                                           dtype=np.float32)
    random_feature_areas = [
        tf.convert_to_tensor(np.random.random(
            (1, anchor_size, anchor_size, 12)),
                             dtype=np.float32)
    ]

    _ = feature_mapper(random_image)
    _ = rpn(random_features)
    _ = classifier(random_feature_areas)
    _ = regr(random_feature_areas)

    feature_mapper.save_weights("./weights/feature_mapper")
    rpn.save_weights("./weights/rpn")
    classifier.save_weights("./weights/classifier")
    regr.save_weights("./weights/regr")
 def test_classify(self):
     model_name = '../models/glove_wiki/glove_model_40.pth'
     output_file = 'test/test_data/glove_classify_model.pkl'
     compare_output_file = 'glove_classify_model.pkl'
     classifier = Classifier(model_name=model_name)
     classifier.classify()
     assert True is filecmp.cmp(output_file, compare_output_file)
示例#4
0
    def init_fn(self, shared_model=None, **kwargs):
        if self.options.model.name == "pixel2mesh":
            # Visualization renderer
            self.renderer = MeshRenderer(self.options.dataset.camera_f, self.options.dataset.camera_c,
                                         self.options.dataset.mesh_pos)
            # create ellipsoid
            self.ellipsoid = Ellipsoid(self.options.dataset.mesh_pos)
        else:
            self.renderer = None

        if shared_model is not None:
            self.model = shared_model
        else:
            if self.options.model.name == "pixel2mesh":
                # create model
                self.model = P2MModel(self.options.model, self.ellipsoid,
                                      self.options.dataset.camera_f, self.options.dataset.camera_c,
                                      self.options.dataset.mesh_pos)
            elif self.options.model.name == "classifier":
                self.model = Classifier(self.options.model, self.options.dataset.num_classes)
            else:
                raise NotImplementedError("Your model is not found")
            self.model = torch.nn.DataParallel(self.model, device_ids=self.gpus).cuda()

        # Setup a joint optimizer for the 2 models
        if self.options.optim.name == "adam":
            self.optimizer = torch.optim.Adam(
                params=list(self.model.parameters()),
                lr=self.options.optim.lr,
                betas=(self.options.optim.adam_beta1, 0.999),
                weight_decay=self.options.optim.wd
            )
        elif self.options.optim.name == "sgd":
            self.optimizer = torch.optim.SGD(
                params=list(self.model.parameters()),
                lr=self.options.optim.lr,
                momentum=self.options.optim.sgd_momentum,
                weight_decay=self.options.optim.wd
            )
        else:
            raise NotImplementedError("Your optimizer is not found")
        self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            self.optimizer, self.options.optim.lr_step, self.options.optim.lr_factor
        )

        # Create loss functions
        if self.options.model.name == "pixel2mesh":
            self.criterion = P2MLoss(self.options.loss, self.ellipsoid).cuda()
        elif self.options.model.name == "classifier":
            self.criterion = CrossEntropyLoss()
        else:
            raise NotImplementedError("Your loss is not found")

        # Create AverageMeters for losses
        self.losses = AverageMeter()

        # Evaluators
        self.evaluators = [Evaluator(self.options, self.logger, self.summary_writer, shared_model=self.model)]
示例#5
0
def define_C(gpu_ids=[]):
    use_gpu = len(gpu_ids) > 0

    if use_gpu:
        assert (torch.cuda.is_available())
    netC = Classifier()
    if use_gpu:
        netC.cuda(device=gpu_ids[0])
    #netC.apply(weights_init)
    return netC
示例#6
0
def main():
    config = get_config()

    loader = Dataload()
    train, label_train, test, label_test = loader.class_train_test(path_feat_data=config.feat_path,
                                                                   batch_size=config.batch_size,
                                                                   shuffle=True)

    model = Classifier(batch_size=config.batch_size)
    trainer = TrainerClass(model, model_dir=config.model_dir, log_dir=config.log_dir)
    trainer.train(list(zip(train, label_train)), lr=config.lr, n_epoch=config.epochs)
    model.eval()
    trainer.test((test, label_test))
示例#7
0
def get_modules(opt):
    modules = {}
    disc = Discriminator()
    gen = Generator()
    clf = Classifier()
    if opt.cuda:
        disc = disc.cuda()
        gen = gen.cuda()
        clf = clf.cuda()

    modules['Discriminator'] = disc
    modules['Generator'] = gen
    modules['Classifier'] = clf
    return modules
示例#8
0
def define_C(gpu_ids=[]):
    """
    gender preserving classifier
    :param gpu_ids:
    :return:
    """
    use_gpu = len(gpu_ids) > 0

    if use_gpu:
        assert (torch.cuda.is_available())
    netC = Classifier()
    if use_gpu:
        netC.cuda(device=gpu_ids[0])
    # netC.apply(weights_init)
    return netC
def build_model_and_trainer(config, data_loader):
    if config.model.type == 'classifier':
        model_builder = Classifier(config)
        model, parallel_model = WithLoadWeights(model_builder, model_name='classifier') \
            .build_model(model_name='classifier')
        trainer = ClassifierTrainer(model, parallel_model, data_loader, config)

        return model, trainer

    elif config.model.type == 'dcgan':
        g_model_builder = Generator(config)
        d_model_builder = Discriminator(config)
        # c_model_builder = Classifier(config)

        g = g_model_builder.define_model('generator')
        d, parallel_d = d_model_builder.build_model('discriminator')
        # c, _ = c_model_builder.build_model('classifier')

        # Load weights to classifier
        # checkpoint_path = './experiments/classifier_mnist/checkpoints/0050-classifier.hdf5'
        # if os.path.exists(checkpoint_path):
        #     c.load_weights(checkpoint_path)

        combined_model_builder = GANCombined(config)

        combined, parallel_combined = WithLoadWeights(combined_model_builder, model_name='combined') \
            .build_model(g=g, d=d, model_name='combined')
        # .build_model(g=g, d=d, c=c, model_name='combined')
        # trainer = GANTrainer(data_loader, config, g, d, parallel_d, c, combined, parallel_combined)
        trainer = GANTrainer(data_loader, config, g, d, parallel_d, combined,
                             parallel_combined)

        return combined, trainer
示例#10
0
def train():
    model = Classifier()
    saver = tf.train.Saver()
    
    optimizer = tf.train.AdamOptimizer(learning_rate)
    train_step = optimizer.minimize(model.loss)
    
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter(join(log_dir, 'train'), sess.graph)
        test_writer = tf.summary.FileWriter(join(log_dir, 'test'), sess.graph)
        sess.run(tf.global_variables_initializer())
        
        for i in trange(n_iters):
            x, y, = mnist.train.next_batch(batch_size)
            _, summary = sess.run([train_step, model.summary], feed_dict={
                model.x: x,
                model.y: y,
                model.keep_prob: 0.5,
            })
            train_writer.add_summary(summary, i)
            
            if i % 500 == 0:
                summary = sess.run(model.summary, feed_dict={
                    model.x: mnist.test.images[:6000],
                    model.y: mnist.test.labels[:6000],
                    model.keep_prob: 1.0,
                })
                test_writer.add_summary(summary, i)
        print('Training complete, model saved at:', saver.save(sess, model_path))
def transfer_classifier_params(args):
    pytorch_model = TorchClassifier()
    with open(args.torch_classifier_model_path, 'rb') as f:
        weights = dill.load(f)
    state_dict = {k: torch.FloatTensor(v) for k, v in weights[0].items()}
    pytorch_model.load_state_dict(state_dict)
    for k in state_dict.keys():
        print('key: ', k)

    chainer_model = Classifier()

    copy_conv_weight(pytorch_model.LeNet[0], chainer_model.conv1)
    copy_conv_weight(pytorch_model.LeNet[2], chainer_model.conv2)
    copy_batch_norm_weight(pytorch_model.LeNet[3], chainer_model.batch_norm1)
    copy_conv_weight(pytorch_model.LeNet[5], chainer_model.conv3)
    copy_batch_norm_weight(pytorch_model.LeNet[6], chainer_model.batch_norm2)
    copy_conv_weight(pytorch_model.LeNet[8], chainer_model.conv4)
    copy_batch_norm_weight(pytorch_model.LeNet[9], chainer_model.batch_norm3)
    copy_conv_weight(pytorch_model.LeNet[11], chainer_model.conv5)

    if args.sample_image:
        subtractor = Subtractor()
        load_model(args.chainer_subtractor_model_path, subtractor)
        image1 = convert_to_grayscale(subtractor, args.sample_image).data
        image2 = np.zeros(shape=image1.shape, dtype=np.float32)
        print('image1 shape: ', image1.shape)
        print('image2 shape: ', image2.shape)

        classify_image_with_pytorch_model(pytorch_model, torch.Tensor(image1), torch.Tensor(image2))
        classify_image_with_chainer_model(chainer_model, chainer.Variable(image1), chainer.Variable(image2))

    save_model(args.chainer_classifier_save_path, chainer_model)
示例#12
0
 def test_classify_predict(self):
     self.test_data_loader = DataLoader()
     self.test_japanese_wiki_data = 'test/test_data/jawiki_test.txt'
     test_word2index, test_index2word, test_window_data, \
     test_X_ik, test_weightinhg_dict = self.test_data_loader.load_data(file_name=self.test_japanese_wiki_data)  # noqa
     model_name = '../models/glove_wiki/glove_model_40.pth'
     output_file = 'test/test_data/glove_classify_model.pkl'
     classifier = Classifier(model_name=model_name)
     print(test_word2index)
     classes = classifier.classify_predict(word='の',
                                           classify_model_name=output_file,
                                           word2index=test_word2index)
     assert 2 == classes
     classes = classifier.classify_predict(word='どうよ?',
                                           classify_model_name=output_file,
                                           word2index=test_word2index)
     assert 9999 == classes
示例#13
0
def define_classifier(args, config, device):
    # -import required model
    from models.classifier import Classifier
    # -create model
    if (hasattr(args, "depth") and args.depth > 0):
        model = Classifier(
            image_size=config['size'],
            image_channels=config['channels'],
            classes=config['classes'],
            # -conv-layers
            conv_type=args.conv_type,
            depth=args.depth,
            start_channels=args.channels,
            reducing_layers=args.rl,
            num_blocks=args.n_blocks,
            conv_bn=True if args.conv_bn == "yes" else False,
            conv_nl=args.conv_nl,
            global_pooling=checkattr(args, 'gp'),
            # -fc-layers
            fc_layers=args.fc_lay,
            fc_units=args.fc_units,
            h_dim=args.h_dim,
            fc_drop=args.fc_drop,
            fc_bn=True if args.fc_bn == "yes" else False,
            fc_nl=args.fc_nl,
            excit_buffer=True,
            # -training related parameters
            AGEM=utils.checkattr(args, 'agem')).to(device)
    else:
        model = Classifier(
            image_size=config['size'],
            image_channels=config['channels'],
            classes=config['classes'],
            # -fc-layers
            fc_layers=args.fc_lay,
            fc_units=args.fc_units,
            h_dim=args.h_dim,
            fc_drop=args.fc_drop,
            fc_bn=True if args.fc_bn == "yes" else False,
            fc_nl=args.fc_nl,
            excit_buffer=True,
            # -training related parameters
            AGEM=utils.checkattr(args, 'agem')).to(device)
    # -return model
    return model
示例#14
0
 def __get_model(self):
     if self.config["use_cae"] and self.config["use_cnn"]:
         return Classifier(self.config)
     elif self.config["use_cnn"]:
         return CNN(self.config)
     elif self.config["use_cae"]:
         return ConvolutionalAutoEncoder(self.config)
     else:
         assert False, "At least one model should be specified."
def inception_score(path_to_generated_imgs_dir,
                    path_to_generated_imgs_dir_cache, downsample_scale,
                    path_to_classifier, classifier_dimensionality,
                    cuda_enabled, batch_size, splits):
    # Set up data
    generated_brainpedia = Brainpedia(
        data_dirs=[path_to_generated_imgs_dir],
        cache_dir=path_to_generated_imgs_dir_cache,
        scale=downsample_scale,
        multi_tag_label_encoding=MULTI_TAG_LABEL_ENCODING)
    generated_brain_data_shape, generated_brain_data_tag_shape = generated_brainpedia.sample_shapes(
    )
    all_generated_brain_data, all_generated_brain_data_tags = generated_brainpedia.all_data(
    )
    all_generated_brain_data = Variable(torch.Tensor(all_generated_brain_data))

    if cuda_enabled:
        all_generated_brain_data = all_generated_brain_data.cuda()

    # Load classifier model
    classifier = Classifier(dimensionality=classifier_dimensionality,
                            num_classes=generated_brain_data_tag_shape[0],
                            cudaEnabled=cuda_enabled)
    classifier.load_state_dict(torch.load(path_to_classifier))

    # Compute predictions
    predictions = classifier.forward(
        all_generated_brain_data).data.cpu().numpy()

    # Now compute the mean kl-div
    N = len(all_generated_brain_data)
    split_scores = []

    for k in range(splits):
        part = predictions[k * (N // splits):(k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
示例#16
0
文件: tracker.py 项目: MaybeS/MOT
    def __init__(self,
                 min_score: float = .2, min_dist: float = .64, max_lost: int = 120,
                 use_tracking: bool = True, use_refind: bool = True):

        self.min_score = min_score
        self.min_dist = min_dist
        self.max_lost = max_lost

        self.use_tracking = use_tracking
        self.use_refind = use_refind

        self.tracked = []
        self.lost = []
        self.removed = []

        self.motion = KalmanFilter()

        self.identifier = Identifier().load()
        self.classifier = Classifier().load()

        self.frame = 0
示例#17
0
	def __init__(self, args, nclass):
		super(Model, self).__init__()

		self.nclass = nclass
		if args.dataset == 'embedding':
			self.backbone = Classifier(self.nclass)
		elif 'xor_resnet' in args.arch:
			self.backbone = XORS[args.arch](pretrained=args.pretrained)
			final_feature = self.backbone.fc.in_features
			self.backbone.fc = nn.Linear(final_feature, self.nclass)
		else:
			self.backbone = models.__dict__[args.arch](pretrained=args.pretrained)
			final_feature = self.backbone.fc.in_features
			self.backbone.fc = nn.Linear(final_feature, self.nclass)
示例#18
0
    def init_fn(self, shared_model=None, **kwargs):
        if self.options.model.name == "pixel2mesh":
            # Renderer for visualization
            self.renderer = MeshRenderer(self.options.dataset.camera_f,
                                         self.options.dataset.camera_c,
                                         self.options.dataset.mesh_pos)
            # Initialize distance module
            self.chamfer = ChamferDist()
            # create ellipsoid
            self.ellipsoid = Ellipsoid(self.options.dataset.mesh_pos)
            # use weighted mean evaluation metrics or not
            self.weighted_mean = self.options.test.weighted_mean
        else:
            self.renderer = None
        self.num_classes = self.options.dataset.num_classes

        if shared_model is not None:
            self.model = shared_model
        else:
            if self.options.model.name == "pixel2mesh":
                # create model
                self.model = P2MModel(self.options.model, self.ellipsoid,
                                      self.options.dataset.camera_f,
                                      self.options.dataset.camera_c,
                                      self.options.dataset.mesh_pos)
            elif self.options.model.name == "classifier":
                self.model = Classifier(self.options.model,
                                        self.options.dataset.num_classes)
            else:
                raise NotImplementedError("Your model is not found")
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.gpus).cuda()

        # Evaluate step count, useful in summary
        self.evaluate_step_count = 0
        self.total_step_count = 0
示例#19
0
def train():
    feature_mapper = FeatureMapper()
    rpn = Rpn()
    roi_pooling = RoiPooling()
    classifier = Classifier()
    regr = Regr()

    feature_mapper.load_weights("./weights/feature_mapper")
    rpn.load_weights("./weights/rpn")
    classifier.load_weights("./weights/classifier")
    regr.load_weights("./weights/regr")

    opt = Adam(learning_rate=5e-5)
    with open("../data/data_detect_local_evaluate_10000.json") as json_file:
        data = json.load(json_file)
    data_index = 0
    while str(data_index) in data:
        raw_data = data[str(data_index)]
        target, bounding_box_target = get_localization_data(raw_data)
        img = get_img("../pictures/pictures_detect_local_evaluate_10000/{}.png".format(data_index))

        def get_loss():
            features = feature_mapper(img)
            rpn_map = rpn(features)

            boxes, probs = get_boxes(rpn_map)
            feature_areas = roi_pooling(features, boxes)

            classification_logits = classifier(feature_areas)
            regression_values = regr(feature_areas)

            labels_boxes = get_labels_boxes(boxes, target)

            localization_loss = get_localization_loss(rpn_map, target)
            regression_loss = get_regression_loss(regression_values, boxes, bounding_box_target, probs)
            classification_loss = get_classification_loss(classification_logits, labels_boxes, probs)

            no_regr_boxes_precision = get_boxes_precision(boxes, np.zeros(regression_values.shape), target)
            final_boxes_precision = get_boxes_precision(boxes, regression_values.numpy(), target)
            save_data(data_index, raw_data, boxes.tolist(), [a.numpy().tolist() for a in classification_logits], labels_boxes, no_regr_boxes_precision, final_boxes_precision, probs.tolist())

            return localization_loss + classification_loss + regression_loss

        opt.minimize(
            get_loss,
            [feature_mapper.trainable_weights, rpn.trainable_weights, classifier.trainable_weights, regr.trainable_weights],
        )

        data_index += 1
        if (data_index % 100 == 99):
            feature_mapper.save_weights("./weights/feature_mapper")
            rpn.save_weights("./weights/rpn")
            classifier.save_weights("./weights/classifier")
            regr.save_weights("./weights/regr")
示例#20
0
    def __init__(self, dim, ndf, nclasses, ngroups, nchannels):
        super(EncLSTM, self).__init__()

        self.dim = dim
        self.ndf = ndf
        self.nclasses = nclasses
        self.ngroups = ngroups
        self.nchannels = nchannels

        self.encoder = Encoder(self.ndf, self.ngroups, self.nchannels)
        self.convlstm = ConvLSTM(input_channels=self.ndf * 8,
                                 hidden_channels=[self.ndf * 8],
                                 kernel_size=3,
                                 step=5,
                                 effective_step=[4])
        self.classifier = Classifier(self.ndf * 8, self.nclasses, self.ngroups)
        self.soft_attention = SoftAttention(self.ndf * 16)
def model_choice(model_name, out_channels=2):
    model_name = model_name.lower()

    model_dict = {'unet': UNet(out_channels),
                  'deeplabv3+': DeepLabV3Plus(out_channels),
                  'deeplabv3plus': DeepLabV3Plus(out_channels),
                  'fcn': FCN4x(out_channels),
                  'deeperlabc': DeeperLabC(out_channels),
                  'deeperlab': DeeperLabC(out_channels)
                  }

    try:
        model = model_dict[model_name]
    except KeyError:
        model = None
        print('no such model, please check "model_name" in config.py')
        exit(0)

    classifier = Classifier(out_channels)
    return model, classifier
示例#22
0
def main():
    config = get_config()

    loader = Dataload()
    train, label_train, test, label_test = loader.class_train_test(path_feat_data=config.feat_path,
                                                                   batch_size=config.batch_size,
                                                                   train_ratio=1,
                                                                   shuffle=True)


    ae = AE(13*3)
    ae.load_state_dict(torch.load(config.ae_model_path))
    ae.eval()

    classifier = Classifier(13*3, num_classes=109, batch_size=50)
    classifier.load_state_dict(torch.load(config.class_model_path))
    classifier.eval()

    trainer = TrainerClass(classifier)
    unnoise_data = ae(train)
    trainer.test((unnoise_data, label_train))
示例#23
0
import numpy as np
import tensorflow as tf

from boxes import get_boxes, get_final_box
from constants import real_image_height, real_image_width, feature_size
from models.classifier import Classifier
from models.feature_mapper import FeatureMapper
from models.regr import Regr
from models.roi_pooling import RoiPooling
from models.rpn import Rpn
from models.segmentation import Segmentation

feature_mapper = FeatureMapper()
rpn = Rpn()
roi_pooling = RoiPooling()
classifier = Classifier()
regr = Regr()
segmentation = Segmentation()

feature_mapper.load_weights("./weights/feature_mapper")
rpn.load_weights("./weights/rpn")
classifier.load_weights("./weights/classifier")
regr.load_weights("./weights/regr")
segmentation.load_weights("./weights/segmentation")


def get_prediction(img):
    features = feature_mapper(img)
    rpn_map = rpn(features)

    boxes, probs = get_boxes(rpn_map)
示例#24
0
def main(input_args=None):
    # Parse the arguments.
    args = parse_arguments(input_args)
    device = args.gpu
    method = args.method

    if args.data_name == 'suzuki':
        datafile = 'data/suzuki_type_test_v2.csv'
        class_num = 119
        class_dict = {'M': 28, 'L': 23, 'B': 35, 'S': 10, 'A': 17}
        dataset_filename = 'test_data.npz'
        labels = ['Yield', 'M', 'L', 'B', 'S', 'A', 'id']
    elif args.data_name == 'CN':
        datafile = 'data/CN_coupling_test.csv'
        class_num = 206
        class_dict = {'M': 44, 'L': 47, 'B': 13, 'S': 22, 'A': 74}
        dataset_filename = 'test_CN_data.npz'
        labels = ['Yield', 'M', 'L', 'B', 'S', 'A', 'id']
    elif args.data_name == 'Negishi':
        datafile = 'data/Negishi_test.csv'
        class_num = 106
        class_dict = {'M': 32, 'L': 20, 'T': 8, 'S': 10, 'A': 30}
        dataset_filename = 'test_Negishi_data.npz'
        labels = ['Yield', 'M', 'L', 'T', 'S', 'A', 'id']
    elif args.data_name == 'PKR':
        datafile = 'data/PKR_test.csv'
        class_num = 83
        class_dict = {
            'M': 18,
            'L': 6,
            'T': 7,
            'S': 15,
            'A': 11,
            'G': 1,
            'O': 13,
            'P': 4,
            'other': 1
        }
        dataset_filename = 'test_PKR_data.npz'
        labels = [
            'Yield', 'M', 'L', 'T', 'S', 'A', 'G', 'O', 'P', 'other', 'id'
        ]
    else:
        raise ValueError('Unexpected dataset name')

    cache_dir = os.path.join('input', '{}_all'.format(method))

    # Dataset preparation.
    def postprocess_label(label_list):
        return numpy.asarray(label_list, dtype=numpy.float32)

    print('Preprocessing dataset...')

    # Load the cached dataset.
    dataset_cache_path = os.path.join(cache_dir, dataset_filename)

    dataset = None
    if os.path.exists(dataset_cache_path):
        print('Loading cached dataset from {}.'.format(dataset_cache_path))
        dataset = NumpyTupleDataset.load(dataset_cache_path)
    if dataset is None:
        if args.method == 'mpnn':
            preprocessor = preprocess_method_dict['ggnn']()
        else:
            preprocessor = preprocess_method_dict[args.method]()
        parser = CSVFileParser(
            preprocessor,
            postprocess_label=postprocess_label,
            labels=labels,
            smiles_col=['Reactant1', 'Reactant2', 'Product'],
            label_dicts=class_dict)
        dataset = parser.parse(datafile)['dataset']

        # Cache the laded dataset.
        if not os.path.exists(cache_dir):
            os.makedirs(cache_dir)
        NumpyTupleDataset.save(dataset_cache_path, dataset)

    labels = dataset.get_datasets()[-2]
    ids = dataset.get_datasets()[-1][:, 1].reshape(-1, 1)
    yields = dataset.get_datasets()[-1][:, 0].reshape(-1, 1).astype(
        'float32')  # [:,0] added
    dataset = NumpyTupleDataset(*(dataset.get_datasets()[:-2] + (
        yields,
        labels,
    )))

    # Load the standard scaler parameters, if necessary.
    scaler = None
    test = dataset

    print('Predicting...')
    # Set up the regressor.
    model_path = os.path.join(args.in_dir, args.model_filename)

    if os.path.exists(model_path):
        classifier = Classifier.load_pickle(model_path, device=args.gpu)
    else:
        predictor = set_up_predictor(args.method, args.unit_num,
                                     args.conv_layers, class_num)
        classifier = Classifier(predictor,
                                lossfun=F.sigmoid_cross_entropy,
                                metrics_fun=F.binary_accuracy,
                                device=args.gpu)

    if args.load_modelname:
        serializers.load_npz(args.load_modelname, classifier)
    scaled_predictor = ScaledGraphConvPredictor(
        graph_conv=classifier.predictor.graph_conv,
        mlp=classifier.predictor.mlp)
    classifier.predictor = scaled_predictor

    # This callback function extracts only the inputs and discards the labels.
    def extract_inputs(batch, device=None):
        return concat_mols(batch, device=device)[:-1]

    # Predict the output labels.
    # Prediction function rewrite!!!
    y_pred = classifier.predict(test, converter=extract_inputs)
    y_pred_max = numpy.argmax(y_pred, axis=1)
    y_pred_max = y_pred_max.reshape(-1, 1)
    # y_pred_idx = y_pred.argsort(axis=1) # ascending

    # Extract the ground-truth labels.
    t = concat_mols(test, device=-1)[-1]  # device 11/14 memory issue
    original_t = cuda.to_cpu(t)
    t_idx = original_t.squeeze(1)
    t_idx = t_idx.argsort(axis=1)
    # gt_indx = numpy.where(original_t == 1)

    # Construct dataframe.
    df_dict = {}
    for i, l in enumerate(labels[:1]):
        df_dict.update({
            'y_pred_{}'.format(l): y_pred_max[:, -1].tolist(),  # [:,-1]
            't_{}'.format(l): t_idx[:, -1].tolist(),
        })
    df = pandas.DataFrame(df_dict)

    # Show a prediction/ground truth table with 5 random examples.
    print(df.sample(5))

    n_eval = 10

    for target_label in range(y_pred_max.shape[1]):
        label_name = labels[:1][0][target_label]
        print('label_name = {}, y_pred = {}, t = {}'.format(
            label_name, y_pred_max[:n_eval, target_label], t_idx[:n_eval, -1]))

    # Perform the prediction.
    print('Evaluating...')
    test_iterator = SerialIterator(test, 16, repeat=False, shuffle=False)
    eval_result = Evaluator(test_iterator,
                            classifier,
                            converter=concat_mols,
                            device=args.gpu)()
    print('Evaluation result: ', eval_result)

    with open(os.path.join(args.in_dir, 'eval_result.json'), 'w') as f:
        json.dump(eval_result, f)

    res_dic = {}
    for i in range(len(y_pred)):
        res_dic[i] = str(ids[i])
    json.dump(res_dic, open(os.path.join(args.in_dir, "test_ids.json"), "w"))

    pickle.dump(y_pred, open(os.path.join(args.in_dir, "pred.pkl"), "wb"))
    pickle.dump(original_t, open(os.path.join(args.in_dir, "gt.pkl"), "wb"))
示例#25
0
def cut():
    text = request.json
    return jsonify(Classifier().cut_from_text(text['data']))
示例#26
0


# split cv and training data
train, cv = data[:1500], data[1500:]
train_total_sold, cv_total_sold = total_sold[:1500], total_sold[1500:]
train_week26_stores, cv_week26_stores = week26_stores[:1500], week26_stores[1500:]
train_us26 = us26[:1500]
#print us26[0:10]
#print data[0]
#print len(train[0])
#print len(train[10])
#print len(train[100])
#print len(us26)
#print total_sold[0]



# classify
start = time.time()
preds = Classifier.preds(train, train_us26, cv, cv_week26_stores)
preds = ProductPostprocess.nonnegatives(preds)
print "duration: ", time.time() - start
print preds[:10]
print cv_total_sold[:10]



# score
print "score: ", score.rmsle(preds, cv_total_sold)
示例#27
0
import torch

from models.classifier import Classifier
from options import options


options.model.backbone = "vgg16"
model = Classifier(options.model, 1000)
state_dict = torch.load("checkpoints/debug/migration/400400_000080.pt")
model.load_state_dict(state_dict["model"])
torch.save(model.nn_encoder.state_dict(), "checkpoints/debug/migration/vgg16-p2m.pth")
示例#28
0
train_data, us26, _ = Product.data_for_units_per_store()
test_data, _, week26_stores, product_ids = Product.data_for_units_per_store(data_file="/home/ubuntu/product_launch/data/test.csv", ids=True)

matrix = ProductPreprocess.to_matrix(train_data)
matrix = ProductPreprocess.scale(matrix)
matrix = ProductPreprocess.polynomial(matrix, 2)
data = matrix.tolist()



# split cv and training data
train_total_sold, cv_total_sold = total_sold[:1500], total_sold[1500:]
train_week26_stores, cv_week26_stores = week26_stores[:1500], week26_stores[1500:]
train_us26 = us26[:1500]
print us26[0:10]
print data[0]
#print len(train[0])
#print len(train[10])
#print len(train[100])
#print len(us26)
#print total_sold[0]



# classify
start = time.time()
preds = Classifier.preds(train_data, us26, test_data, week26_stores)
preds = ProductPostprocess.nonnegatives(preds)
ProductPostprocess.submit(preds, product_ids)
print "duration: ", time.time() - start
示例#29
0
文件: tracker.py 项目: MaybeS/MOT
class Tracker(object):
    def __init__(self,
                 min_score: float = .2, min_dist: float = .64, max_lost: int = 120,
                 use_tracking: bool = True, use_refind: bool = True):

        self.min_score = min_score
        self.min_dist = min_dist
        self.max_lost = max_lost

        self.use_tracking = use_tracking
        self.use_refind = use_refind

        self.tracked = []
        self.lost = []
        self.removed = []

        self.motion = KalmanFilter()

        self.identifier = Identifier().load()
        self.classifier = Classifier().load()

        self.frame = 0

    def update(self, image: np.ndarray, boxes: np.ndarray, scores: np.ndarray) \
            -> Iterable[trace.Trace]:
        self.frame += 1

        refind, lost = [], []
        activated, removed = [], []
        # Step 1. Prediction
        for track in chain(self.tracked, self.lost):
            track.predict()

        # Step 2. Selection by score
        if scores is None:
            scores = np.ones(np.size(boxes, 0), dtype=float)

        detections = list(chain(
            map(lambda t: trace.Trace(*t, from_det=True), zip(boxes, scores)),
            map(lambda t: trace.Trace(*t, from_det=False), zip(boxes, scores))
        ))

        self.classifier.update(image)

        detections.extend(map(lambda t: trace.Trace(t.tracking(image), t.track_score, from_det=True),
                              filter(lambda t: t.is_activated, chain(self.tracked, self.lost))))

        rois = np.asarray(list(map(lambda t: t.to_tlbr, detections)), np.float32)

        class_scores = self.classifier.predict(rois)
        scores = np.concatenate([
            np.ones(np.size(boxes, 0), dtype=np.float32),
            np.fromiter(map(lambda t: t.score, detections[np.size(boxes, 0):]), dtype=np.float32)
        ]) * class_scores

        # Non-maxima suppression
        if len(detections) > 0:
            mask = np.zeros(np.size(rois, 0), dtype=np.bool)
            mask[list(nms(rois, scores.reshape(-1), threshold=.4))] = True

            indices = np.zeros_like(detections, dtype=np.bool)
            indices[np.where(mask & (scores >= self.min_score))] = True

            detections = list(compress(detections, indices))
            scores = scores[indices]

            for detection, score in zip(detections, scores):
                detection.score = score

        predictions = list(filter(lambda t: not t.from_det, detections))
        detections = list(filter(lambda t: t.from_det, detections))

        # set features
        features = self.identifier.extract(image, np.asarray(
            list(map(lambda t: t.to_tlbr, detections)), dtype=np.float32)
        )

        for idx, detection in enumerate(detections):
            detection.feature = features[idx]

        # Step3. Association for tracked
        # matching for tracked target
        unconfirmed = list(filter(lambda t: not t.is_activated, self.tracked))
        tracked = list(filter(lambda t: t.is_activated, self.tracked))

        distance = matching.nearest_distance(tracked, detections, metric='euclidean')
        cost = matching.gate_cost(self.motion, distance, tracked, detections)
        matches, u_track, u_detection = matching.assignment(cost, threshold=self.min_dist)

        for track, det in matches:
            tracked[track].update(self.frame, image, detections[det])

        # matching for missing targets
        detections = list(map(lambda u: detections[u], u_detection))
        distance = matching.nearest_distance(self.lost, detections, metric='euclidean')
        cost = matching.gate_cost(self.motion, distance, self.lost, detections)
        matches, u_lost, u_detection = matching.assignment(cost, threshold=self.min_dist)

        for miss, det in matches:
            self.lost[miss].reactivate(self.frame, image, detections[det], reassign=not self.use_refind)
            refind.append(self.lost[miss])

        # remaining tracked
        matched_size = len(u_detection)
        detections = list(map(lambda u: detections[u], u_detection)) + predictions
        u_tracked = list(map(lambda u: tracked[u], u_track))
        distance = matching.iou_distance(u_tracked, detections)
        matches, u_track, u_detection = matching.assignment(distance, threshold=.8)

        for track, det in matches:
            u_tracked[track].update(self.frame, image, detections[det], update_feature=True)

        for track in map(lambda u: u_tracked[u], u_track):
            track.lost()
            lost.append(track)

        # unconfirmed
        detections = list(map(lambda u: detections[u], filter(lambda u: u < matched_size, u_detection)))
        distance = matching.iou_distance(unconfirmed, detections)
        matches, u_unconfirmed, u_detection = matching.assignment(distance, threshold=.8)

        for track, det in matches:
            unconfirmed[track].update(self.frame, image, detections[det], update_feature=True)

        for track in map(lambda u: unconfirmed[u], u_unconfirmed):
            track.remove()
            removed.append(track)

        # Step 4. Init new trace
        for track in filter(lambda t: t.from_det and t.score >= .6,
                            map(lambda u: detections[u], u_detection)):
            track.activate(self.frame, image, self.motion)
            activated.append(track)

        # Step 5. Update state
        for track in filter(lambda t: self.frame - t.frame > self.max_lost, self.lost):
            track.remove()
            removed.append(track)

        self.tracked = list(chain(
            filter(lambda t: t.state == trace.State.Tracked, self.tracked),
            activated, refind,
        ))
        self.lost = list(chain(
            filter(lambda t: t.state == trace.State.Lost, self.lost),
            lost
        ))
        self.removed.extend(removed)

        lost_score = self.classifier.predict(
            np.asarray(list(map(lambda t: t.to_tlbr, self.lost)), dtype=np.float32)
        )

        return chain(
            filter(lambda t: t.is_activated, self.tracked),
            map(lambda it: it[1],
                filter(lambda it: lost_score[it[0]] > .3 and self.frame - it[1].frame <= 4,
                       enumerate(self.lost)))
        )
def main():
	#TODO: Get args
	# python3 train_fixmatch.py --checkpoint-path ./checkpoint_path/model.pth --batch-size 1 --num-epochs 1 --num-steps 1 --train-from-start 1 --dataset-folder ./dataset
	parser = argparse.ArgumentParser()
	parser.add_argument('--checkpoint-path', type=str, default= "./checkpoints/model_fm_transfer.pth.tar")
	parser.add_argument('--transfer-path', type=str, default= "./checkpoints/model_transfer.pth.tar")
	parser.add_argument('--best-path', type= str, default= "./checkpoints/model_barlow_best.pth.tar")
	parser.add_argument('--batch-size', type=int, default= 64)
	parser.add_argument('--num-epochs', type=int, default= 10)
	parser.add_argument('--num-steps', type=int, default= 10)
	parser.add_argument('--train-from-start', type= int, default= 1)
	parser.add_argument('--dataset-folder', type= str, default= "./dataset")
	parser.add_argument('--new-dataset-folder', type= str, default= "./dataset")
	parser.add_argument('--learning-rate', type = float, default= 0.01)
	parser.add_argument('--threshold', type = float, default= 0.5)
	parser.add_argument('--mu', type= int, default= 7)
	parser.add_argument('--lambd', type= int, default= 1)
	parser.add_argument('--momentum', type= float, default= 0.9)
	parser.add_argument('--weight-decay', type= float, default= 0.001)
	parser.add_argument('--layers', type= int, default= 18)
	parser.add_argument('--fine-tune', type= int, default= 1)
	parser.add_argument('--new-data', type= int, default= 0)
	args = parser.parse_args()

	dataset_folder = args.dataset_folder
	batch_size_labeled = args.batch_size
	mu = args.mu
	batch_size_unlabeled = mu * args.batch_size
	batch_size_val = 256 #5120
	n_epochs = args.num_epochs
	n_steps = args.num_steps
	num_classes = 800
	threshold = args.threshold
	learning_rate = args.learning_rate
	momentum = args.momentum
	lamd = args.lambd
	tau = 0.95
	weight_decay = args.weight_decay
	checkpoint_path = args.checkpoint_path
	train_from_start = args.train_from_start
	n_layers = args.layers

	if torch.cuda.is_available():
		device = torch.device("cuda")
	else:
		device = torch.device("cpu")

	# print("pwd: ", os.getcwd())
	train_transform, val_transform = get_transforms()

	if args.new_data == 0:
		labeled_train_dataset = CustomDataset(root= args.dataset_folder, split = "train", transform = train_transform)
	else:
		labeled_train_dataset = CustomDataset(root= args.new_dataset_folder, split = "train_new", transform = train_transform)
	# labeled_train_dataset = CustomDataset(root= dataset_folder, split = "train", transform = train_transform)
	unlabeled_train_dataset = CustomDataset(root= dataset_folder, 
											split = "unlabeled", 
											transform = TransformFixMatch(mean = 0, std = 0))#TODO
											
	val_dataset = CustomDataset(root= dataset_folder, split = "val", transform = val_transform)

	labeled_train_loader = DataLoader(labeled_train_dataset, batch_size= batch_size_labeled, shuffle= True, num_workers= 4)
	unlabeled_train_loader = DataLoader(unlabeled_train_dataset, batch_size= batch_size_unlabeled, shuffle= True, num_workers= 4)
	val_loader = DataLoader(val_dataset, batch_size= batch_size_val, shuffle= False, num_workers= 4)



	labeled_iter = iter(labeled_train_loader)
	unlabeled_iter = iter(unlabeled_train_loader)


	model = wide_resnet50_2(pretrained=False, num_classes = 800)
	classifier = Classifier(ip= 2048, dp = 0)
	start_epoch = 0

	checkpoint = torch.load(args.transfer_path, map_location= device)
	model.load_state_dict(checkpoint['model_state_dict'])
	classifier.load_state_dict(checkpoint['classifier_state_dict'])

	param_groups = [dict(params=classifier.parameters(), lr=args.learning_rate)]

	if args.fine_tune:
		param_groups.append(dict(params=model.parameters(), lr=args.learning_rate))

	optimizer = torch.optim.SGD(param_groups, 
								lr = learning_rate,
								momentum= momentum,
								nesterov= True,
								weight_decay= weight_decay)

	scheduler = get_cosine_schedule_with_warmup(optimizer, 0, num_training_steps= n_epochs * n_steps)

	if torch.cuda.device_count() > 1:
		print("Let's use", torch.cuda.device_count(), "GPUs!")
		model = torch.nn.DataParallel(model)
		classifier = torch.nn.DataParallel(classifier)

	if train_from_start == 0:
		assert os.path.isfile(checkpoint_path), "Error: no checkpoint directory found!"
		print("Restoring model from checkpoint")
		# args.out = os.path.dirname(args.resume)
		checkpoint = torch.load(checkpoint_path)
		# best_acc = checkpoint['best_acc']
		start_epoch = checkpoint['epoch'] - 1
		model.load_state_dict(checkpoint['backbone_state_dict'])
		classifier.load_state_dict(checkpoint['classifier_state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		scheduler.load_state_dict(checkpoint['scheduler'])

	model = model.to(device)
	classifier = classifier.to(device)
	

	model.train()
	losses = Average()
	losses_l = Average()
	losses_u = Average()
	mask_probs = Average()
	best_val_accuracy = 25.0 #TODO

	for epoch in tqdm(range(start_epoch, n_epochs)):
		if args.fine_tune:
			model.train()
			classifier.train()
		else:
			model.eval()
			classifier.train()

		for batch_idx in tqdm(range(n_steps)):
			try:
				img_lab, targets_lab = labeled_iter.next()
			except:
				labeled_iter = iter(labeled_train_loader)
				img_lab, targets_lab = labeled_iter.next()

			try:
				unlab, _ = unlabeled_iter.next()
				img_weak = unlab[0]
				img_strong = unlab[1]
			except:
				unlabeled_iter = iter(unlabeled_train_loader)
				unlab, _ = unlabeled_iter.next()
				img_weak = unlab[0]
				img_strong = unlab[1]
			
			img_lab = img_lab.to(device)
			targets_lab = targets_lab.to(device)
			img_weak = img_weak.to(device)
			img_strong = img_strong.to(device)

			img_cat = torch.cat((img_lab, img_weak, img_strong), dim = 0)
			logits_cat = classifier(model(img_cat))
			logits_lab = logits_cat[:batch_size_labeled]
			# print(logits_lab.size())
			logits_unlab = logits_cat[batch_size_labeled:]
			# print(logits_unlab)

			logits_weak, logits_strong = torch.chunk(logits_unlab, chunks= 2, dim = 0)

			pseudo_label = torch.softmax(logits_weak.detach()/tau, dim= 1)
			max_probs, targets_unlab = torch.max(pseudo_label, dim= 1)
			mask = max_probs.ge(threshold).float()
			
			loss_labeled = F.cross_entropy(logits_lab, targets_lab, reduction='mean')

			# print("CE: ", F.cross_entropy(logits_strong, targets_unlab, reduction= 'none').size())

			loss_unlabeled = (F.cross_entropy(logits_strong, targets_unlab, reduction= 'none') * mask).mean()

			# print("Loss labelled, loss unlabelled: ", loss_labeled, loss_unlabeled)

			loss_total = loss_labeled + lamd * loss_unlabeled

			# print("Total loss: ", loss_total)
			# loss_epoch += loss_total
			# loss_lab_epoch += loss_labeled
			# loss_unlab_epoch += loss_unlabeled
			losses.update(loss_total.item())
			losses_l.update(loss_labeled.item())
			losses_u.update(loss_unlabeled.item())
			mask_probs.update(mask.mean().item())

			optimizer.zero_grad()
			loss_total.backward()
			optimizer.step()
			scheduler.step()


			# break
			if batch_idx % 25 == 0:
				print(f"Epoch number: {epoch}, loss: {losses.avg}, loss lab: {losses_l.avg}, loss unlab: {losses_u.avg}, mask: {mask_probs.avg}, loss_here: {loss_total.item()}, best accuracy: {best_val_accuracy:.2f}", flush= True)
			# print(optimizer.param_groups[0]['lr'])
		

		save_checkpoint({
				'epoch': epoch + 1,
				'model_state_dict': model.state_dict(),
				'classifier_state_dict': model.state_dict(),
				'optimizer': optimizer.state_dict(),
				'scheduler': scheduler.state_dict(),
			}, checkpoint_path)

		model.eval()
		classifier.eval()
		with torch.no_grad():
			val_loss = 0
			val_size = 0
			total = 0
			correct = 0
			for batch in val_loader:
				logits_val = classifier(model(batch[0].to(device)))
				labels = batch[1].to(device)
				val_loss += F.cross_entropy(logits_val, labels)
				_, predicted = torch.max(logits_val.data, 1)
				total += labels.size(0)
				correct += (predicted == labels).sum().item()
				val_size += 1
				# break
		print(f"Val loss: {val_loss/val_size}, Accuracy: {(100 * correct / total):.2f}%", flush= True)
		if 100 * correct / total > best_val_accuracy:
			best_val_accuracy = 100 * correct / total
			best_val_loss = val_loss/val_size
			print(f"Saving the best model with {best_val_accuracy:.2f}% accuracy and {best_val_loss:.2f} loss", flush= True)
			save_checkpoint({
				'epoch': epoch + 1,
				'model_state_dict': model.state_dict(),
				'classifier_state_dict': classifier.state_dict(),
				'optimizer': optimizer.state_dict(),
				'scheduler': scheduler.state_dict(),
				'best_val_accuracy': best_val_accuracy,
				'best_val_loss': best_val_loss
			}, args.best_path)
		model.train()
		classifier.train()
示例#31
0
    return torch.optim.Adam(parameters, lr)

device = current_device()

n_features = 256
latent_dim = 1
latent_dim_cont = 0
categorical_dim = 10
batch_size = 100
lr = 1e-4
n_epochs = 100

dataloader, _, _ = get_data(batch_size)

models = {
    'classifier': Classifier(latent_dim, categorical_dim),
}

for model in models.values():
    model.to(device)
    model.apply(weights_init_normal)

optimizers = {
    'all': create_optimizer([
        models['classifier'],
    ], lr)
}

losses = {
    'info': loss.MutualInformationLoss(latent_dim, categorical_dim),
}
            # Validate classifier
            loss = classifier.validate(z.detach().cpu().numpy(), labels.numpy())
            classifier_loss += loss * z.shape[0]

    classifier_loss = classifier_loss / len(dataloader.dataset)

    test_loss = running_loss / len(dataloader.dataset)
    return test_loss, classifier_loss


train_loss = []
test_loss = []
classifierloss = []

for epoch in range(epochs):
    print(f"Epoch {epoch + 1} of {epochs}")
    classifier = Classifier()
    train_epoch_loss = train(model, classifier, train_loader)
    test_epoch_loss, classifier_epoch_loss = test(model, classifier, test_loader)
    train_loss.append(train_epoch_loss)
    test_loss.append(test_epoch_loss)
    classifierloss.append(classifier_epoch_loss)
    print(f"Train Loss: {train_epoch_loss:.4f}")
    print(f"Val Loss: {test_epoch_loss:.4f}")
    print(f"Classifier accuracy: {classifier_epoch_loss:.4f}")

print(train_loss)
print(test_loss)
print(classifierloss)
示例#33
0
文件: run.py 项目: pmiller10/lshtc
from data import CompressedData
from models.classifier import Classifier
from sklearn.linear_model import LogisticRegression
from score import score
import time
import sys

limit = int(sys.argv[1])
data, targets = CompressedData.data(limit=limit)
tr_data, tr_targets = data[:limit/2], targets[:limit/2]
cv_data, cv_targets = data[limit/2:], targets[limit/2:]

logistic = Classifier(LogisticRegression())
start = time.time()
logistic.train(tr_data, tr_targets) 
print 'duration ', time.time() - start

preds = [logistic.predict(c) for c in cv_data]
print score(preds, cv_targets)