def predict(opt):
    # net=torch.load('Lenet.pth') # pth格式 只保留参数
    # print('net', net)
    '''
    需要将basic_option中的is_train 修改为false
    '''
    # opt.is_train = False
    acc = 0
    total = 0
    test_dataloader = create_dataloader(opt)
    net = Classification()
    net.load_state_dict(
        torch.load(
            f"./output/train/weights/exp_1/Basic_Epoch_20_Accuracy_0.99.pth"))
    net = net.to(device)
    with torch.no_grad():
        for index, data in enumerate(test_dataloader, start=1):
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = net(images)
            _, predicted = torch.max(outputs.data, dim=1)
            print(f'number {index} picture maybe : {classes[predicted[0]]}')
            total += labels.size(0)
            acc += (predicted == labels).sum().item()
    print('Accuracy on test set : {}%'.format(100 * acc / total))
Пример #2
0
    def __init__(self,
                 adj_matrix,
                 features=None,
                 labels=None,
                 supervised=False,
                 model='gat',
                 n_layer=2,
                 emb_size=128,
                 random_state=1234,
                 device='auto',
                 epochs=5,
                 batch_size=20,
                 sample_size=10,
                 lr=0.7,
                 unsup_loss_type='margin',
                 print_progress=True):
        super(GNN, self).__init__()
        # fix random seeds
        random.seed(random_state)
        np.random.seed(random_state)
        torch.manual_seed(random_state)
        torch.cuda.manual_seed_all(random_state)
        # set parameters
        self.supervised = supervised
        self.lr = lr
        self.epochs = epochs
        self.batch_size = batch_size
        self.sample_size = sample_size
        self.unsup_loss_type = unsup_loss_type
        self.print_progress = print_progress
        self.gat = True if model == 'gat' else False
        # set device
        if device == 'auto':
            self.device = torch.device(
                "cuda" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device

        # load data
        self.dl = DataLoader(adj_matrix, features, labels, supervised,
                             self.device)

        self.gnn = GraphSage(n_layer,
                             emb_size,
                             batch_size,
                             sample_size,
                             self.dl,
                             self.device,
                             gat=self.gat)
        self.gnn.to(self.device)

        if supervised:
            n_classes = len(set(labels))
            self.classification = Classification(emb_size, n_classes)
            self.classification.to(self.device)
def train(opt):

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    train_dataloader, val_dataloader = create_dataloader(opt)
    net = Classification()  # 定义训练的网络模型
    net.to(device)
    net.train()
    loss_function = nn.CrossEntropyLoss()  # 定义损失函数为交叉熵损失函数
    optimizer = optim.Adam(net.parameters(), lr=0.001)  # 定义优化器(训练参数,学习率)

    for epoch in range(opt.num_epochs):  # 一个epoch即对整个训练集进行一次训练
        running_loss = 0.0
        correct = 0
        total = 0
        time_start = time.perf_counter()

        for step, data in enumerate(train_dataloader,
                                    start=0):  # 遍历训练集,step从0开始计算
            inputs, labels = data # 获取训练集的图像和标签
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()  # 清除历史梯度

            # forward + backward + optimize
            # outputs = net(inputs.permute(0,1,3,2))  # 正向传播
            outputs = net(inputs)  # 正向传播
            print('outputs.shape', outputs.shape, labels.shape)
            loss = loss_function(outputs, labels)  # 计算损失
            loss.backward()  # 反向传播
            optimizer.step()  # 优化器更新参数
            predict_y = torch.max(outputs, dim=1)[1]
            total += labels.size(0)
            correct += (predict_y == labels).sum().item()
            running_loss += loss.item()
            # print statistics

            # print('train_dataloader length: ', len(train_dataloader))
        acc = correct / total
        print('Train on epoch {}: loss:{}, acc:{}%'.format(epoch + 1, running_loss / total, 100 * correct / total))
        # 保存训练得到的参数
        if opt.model == 'basic':
            save_weight_name = os.path.join(opt.save_path,
                                            'Basic_Epoch_{0}_Accuracy_{1:.2f}.pth'.format(
                                                epoch + 1,
                                                acc))
        elif opt.model == 'plus':
            save_weight_name = os.path.join(opt.save_path,
                                            'Plus_Epoch_{0}_Accuracy_{1:.2f}.pth'.format(
                                                epoch + 1,
                                                acc))
        torch.save(net.state_dict(), save_weight_name)
    print('Finished Training')
Пример #4
0
def create_models(samples, dictionary):

    neumf = NeuMF(len(samples), len(dictionary), 0.5, 8, [64, 32, 16, 8])
    attr_nets = dict()
    for key, labels in dictionary.items():
        if labels is None:
            # regression;
            attr_nets[key] = Regression(16)
        else:
            # classification
            # NOTE: class num doesnt include blank labels
            attr_nets[key] = Classification(16,
                                            len(labels) - 1)
    return neumf, attr_nets
    def add_classification(self):

        text = self.classificationNameInput.text()

        same_classifications = self.session.query(Classification).filter_by(
            name=text).all()

        if len(same_classifications):
            self.close()
            QMessageBox.warning(self, 'تحذير', 'التصنيف موجود بالفعل!')
            return

        classification = Classification(name=text)

        self.session.add(classification)
        self.session.commit()

        QMessageBox.information(self, 'عملية ناجحة', 'تمت الاضافة بنجاح')
        self.close()
Пример #6
0
        x = self.features(x)
        x = self.output(x)
        return x


mnist_train = gdata.vision.FashionMNIST(train=True,
                                        root=r'../resource/fashion')
mnist_test = gdata.vision.FashionMNIST(train=False,
                                       root=r'../resource/fashion')

transform = gdata.vision.transforms.ToTensor()
train_iter = gdata.DataLoader(dataset=mnist_train.transform_first(transform),
                              shuffle=True,
                              batch_size=128)
test_iter = gdata.DataLoader(mnist_test.transform(transform), batch_size=128)

if __name__ == '__main__':
    ctx = mx.gpu()
    net = Net(classes=10)
    net.initialize(ctx=ctx)
    print(net)

    trainer = Trainer(net.collect_params(), 'adam', {'learning_rate': 0.01})
    fun = gloss.SoftmaxCrossEntropyLoss()

    model = Classification(neural=net, fun=fun, opt=trainer)

    model.train(mnist_train.transform_first(transform),
                batch_size=256,
                epochs=32)
    # image size 3, 32, 32
    # batch size must be an even number
    # shuffle must be True

    cifar_10_train_dt = MyCustomDataset('data',
                                        download=True,
                                        transform=ToTensor())
    cifar_10_train_l = DataLoader(cifar_10_train_dt,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  pin_memory=torch.cuda.is_available())

    encoder = Encoder().to(device)
    loss_fn = DeepInfoMaxLoss(1, 0, 1).to(device)
    classification = Classification().to(device)
    encoder_optim = Adam(encoder.parameters(), lr=1e-4)
    loss_optim = Adam(loss_fn.parameters(), lr=1e-4)
    classification_optim = Adam(classification.parameters(), lr=1e-4)

    epoch_restart = 0
    root = Path(r'models')

    if epoch_restart > 0 and root is not None:
        enc_file = root / Path('encoder' + str(epoch_restart) + '.wgt')
        loss_file = root / Path('loss' + str(epoch_restart) + '.wgt')
        classification_loss_file = root / Path('classification_loss' +
                                               str(epoch_restart) + '.wgt')
        encoder.load_state_dict(torch.load(str(enc_file)))
        loss_fn.load_state_dict(torch.load(str(loss_file)))
        classification.load_state_dict(
Пример #8
0
    args.add_argument('--max_epoch', type=int, default=10)
    args.add_argument('--batch', type=int, default=2000)
    args.add_argument('--strmaxlen', type=int, default=200)
    args.add_argument('--embedding', type=int, default=8)

    # Select model
    args.add_argument('--model', type=str, default='classification', choices=['regression', 'classification'])
    config = args.parse_args()

    print('HAS_DATASET :', HAS_DATASET)
    print('IS_ON_NSML :', IS_ON_NSML)
    print('DATASET_PATH :', DATASET_PATH)

    model_type = {
        'regression' : Regression(config.embedding, config.strmaxlen),
        'classification' : Classification(config.embedding, config.strmaxlen),
    }

    model = model_type[config.model]
    if GPU_NUM:
        model = model.cuda()

    # DONOTCHANGE: Reserved for nsml use
    bind_model(model, config)

    criterion_type = {
        'regression' : nn.MSELoss(),
        'classification' : nn.CrossEntropyLoss(),
    }
    criterion = criterion_type[config.model]
    optimizer = optim.Adam(model.parameters(), lr=0.01)
Пример #9
0
    def parse_line(self, line):
        columns = line.split(self.HAZARDS_DELIMITER)

        # Check columns count
        if len(columns) != self.HAZARDS_COL_COUNT:
            self.error = 'Number of columns must be %d' % self.HAZARDS_COL_COUNT
            return False

        # Clean columns
        temp = []
        for col in columns:
            temp.append(col.strip())
        columns = temp

        # Check signal word
        if not HStatement.is_signal_word(columns[self.HAZARDS_COL_SIGNALWORD]):
            self.error = "'%s' is not a signal word!" % columns[
                self.HAZARDS_COL_SIGNALWORD]
            return False

        pictogram_names = columns[self.HAZARDS_COL_PICTOGRAM].strip()
        for pic_name in pictogram_names.split(','):
            pic_name = pic_name.strip()
            image_name = pic_name + self.HAZARDS_PICTOGRAM_TYPE
            image = None
            try:
                image = self.zip_file.read(image_name)
            except KeyError:
                self.error = 'Did not find <b>%s</b> in zip-file.' % image_name
                return False
            else:
                # Parse Pictogram
                pic = Pictogram.load(pic_name, image)

                # Parse H-Statement
                hstatement = HStatement.load(columns[self.HAZARDS_COL_CODE])
                hstatement.statement = columns[self.HAZARDS_COL_HSTATEMENT]
                hstatement.set_signal_word(
                    columns[self.HAZARDS_COL_SIGNALWORD])

                # Parse Class
                cls = Class.laod(columns[self.HAZARDS_COL_CLASS])
                cls.pictogram = pic.key

                # Parse Category (can be a list)
                cats = self.parse_category(columns[self.HAZARDS_COL_CATEGORY])
                if cats:
                    for cat in cats:
                        c = Classification(parent=CLASSIFICATION_KEY,
                                           clazz=cls.key,
                                           category=cat.key,
                                           hstatement=hstatement.key)
                        c.put()
                        cat.put()
                else:
                    self.error = "Category '%s' could not be understood." % columns[
                        self.HAZARDS_COL_CATEGORY]
                    return False

                # Store all the entities now where everything is fine
                pic.put()
                hstatement.put()
                cls.put()

        return True
Пример #10
0
batch_size = 16

train_data = gdata.DataLoader(cifar_train.transform_first(transform_train),
                              batch_size=batch_size,
                              shuffle=True)

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])

val_data = gluon.data.DataLoader(cifar_test.transform_first(transform_test),
                                 batch_size=batch_size,
                                 shuffle=False)

if __name__ == '__main__':
    ctx = mx.gpu()
    net = get_model('cifar_resnet20_v1', classes=10, pretrained=True)
    net.collect_params().reset_ctx(ctx)
    net.initialize(ctx=ctx)

    model = Classification(net=net, ctx=ctx)

    model.summary()

    history = model.fit(train_data, 1, val_data)

    history.plot()
    plt.legend()
    plt.show()
Пример #11
0
    args.add_argument('--strmaxlen', type=int, default=200)
    args.add_argument('--embedding', type=int, default=300)
    args.add_argument('--model_name', type=str, default='RCNN')

    config = args.parse_args()
    config2 = Config()

    dataset = SentenceClassificationDataset('../data/processing_data',
                                            config.strmaxlen)

    print('unique labels = {}'.format(dataset.get_unique_labels_num()))
    print('vocab size = {}'.format(dataset.get_vocab_size()))

    if config.model_name == 'CNN':
        model = Classification(config.embedding, config.strmaxlen,
                               dataset.get_unique_labels_num(),
                               dataset.get_vocab_size())
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)

    elif config.model_name == 'RCNN':
        model = RCNN(config.embedding, config.strmaxlen,
                     dataset.get_unique_labels_num(), dataset.get_vocab_size())
        if config.mode == 'train':
            model = model.cuda()

        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)

    elif config.model_name == "DUALRCNN":
        model = DualRCNN(config.embedding, config.strmaxlen,