コード例 #1
0
def main(data_path):
    dataset, labels = generate_dataset(data_path)

    pred = KMeans(n_clusters=2, init='random', n_init=5,
                  n_jobs=2).fit_predict(dataset)

    acc = accuracy_score(labels, pred)
    print("Accuracy: {:.3}%".format(acc * 100))
コード例 #2
0
ファイル: main.py プロジェクト: 329tyson/KD_pytorch
        args.pretrain_path = os.path.join(args.root, args.pretrain_path)
    if args.sr_pretrain_path != 'NONE':
        args.sr_pretrain_path = os.path.join(args.root, args.sr_pretrain_path)

    if args.dataset.lower() == 'cub':
        args.classes = 200
    else:
        args.classes = 196

    low_img_need = args.fsr_enabled or args.kd_enabled or args.sr_enabled or args.adapter_loss
    try:
        train_loader, eval_train_loader, eval_validation_loader, num_training, num_validation = generate_dataset(
            args.dataset,
            args.batch,
            args.annotation_train,
            args.annotation_val,
            args.data,
            args.low_ratio,
            args.ten_batch_eval,
            args.verbose,
            low_img_need)
    except ValueError:
        print('inapproriate dataset, please put cub or stanford')
        exit()

    if args.vgg_gap :
        vgg16 = models.vgg16(True)
        net = VGG_gap(vgg16, args.classes)

        if args.student_pretrain_path != 'NONE':
            net.load_state_dict(torch.load(args.student_pretrain_path))
コード例 #3
0
            train_original_data, num_timesteps_input, num_timesteps_output)
        val_input, val_target = get_decomp_dataset(val_original_data,
                                                   num_timesteps_input,
                                                   num_timesteps_output)
        test_input, test_target = get_decomp_dataset(test_original_data,
                                                     num_timesteps_input,
                                                     num_timesteps_output)
    elif args.denosing == "change":
        training_input, training_target = get_change_point_dataset_parall(
            train_original_data, num_timesteps_input, num_timesteps_output)
        val_input, val_target = get_change_point_dataset_parall(
            val_original_data, num_timesteps_input, num_timesteps_output)
        test_input, test_target = get_change_point_dataset_parall(
            test_original_data, num_timesteps_input, num_timesteps_output)
    else:
        training_input, training_target = generate_dataset(
            train_original_data, num_timesteps_input, num_timesteps_output)
        val_input, val_target = generate_dataset(val_original_data,
                                                 num_timesteps_input,
                                                 num_timesteps_output)
        test_input, test_target = generate_dataset(test_original_data,
                                                   num_timesteps_input,
                                                   num_timesteps_output)

    print(training_input.shape, training_target.shape)
    print(val_input.shape, val_target.shape)
    print(test_input.shape, test_target.shape)
    '''

    training_input, training_target = generate_dataset(train_original_data,
                                                       num_timesteps_input=num_timesteps_input,
                                                       num_timesteps_output=num_timesteps_output)
コード例 #4
0
###################################################################################################
# Step 4: Vectorize data using preprocess.py
###################################################################################################

# Locals
from gnn import get_trainer
from preprocess import generate_dataset

#Externals
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler

train_dataset, train_labels, train_True_Ri, train_True_Ro = generate_dataset(
    TrainingDataSets)
test_dataset, test_labels, test_True_Ri, test_True_Ro = generate_dataset(
    TestingDataSets)

train_data_loader = DataLoader(train_dataset, batch_size=BatchSize)
valid_data_loader = DataLoader(test_dataset, batch_size=BatchSize)

###################################################################################################
# Step 5: Setting up the neural network
###################################################################################################

# trainer = get_trainer(distributed=args.distributed, output_dir=output_dir,
#                           device=args.device, **experiment_config)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Using", "cuda:0" if torch.cuda.is_available() else "cpu",
      "for training.")
コード例 #5
0
ファイル: main_task.py プロジェクト: Naplessss/scalable_graph
    def init_data(self, data_dir=None):
        if data_dir is None:
            data_dir = self.config.data_dir

        if self.config.dataset == "metr":
            A, X, means, stds = load_metr_la_data(data_dir)
        elif self.config.dataset == 'nyc-bike':
            A, X, means, stds = load_nyc_sharing_bike_data(data_dir)
        elif self.config.dataset == 'sf-example':
            edge_index, edge_weight, X, shenzhenmask = load_sf_sample_data(
                data_dir)
            self.node_mask = shenzhenmask

        X = X.astype(np.float32)
        split_line1 = int(X.shape[2] * 0.2)
        split_line2 = int(X.shape[2] * 0.4)
        train_original_data = X[:, :, :split_line1]
        val_original_data = X[:, :, split_line1:split_line2]
        test_original_data = X[:, :, split_line2:]

        self.training_input, self.training_target = generate_dataset(
            train_original_data,
            num_timesteps_input=self.config.num_timesteps_input,
            num_timesteps_output=self.config.num_timesteps_output)
        self.val_input, self.val_target = generate_dataset(
            val_original_data,
            num_timesteps_input=self.config.num_timesteps_input,
            num_timesteps_output=self.config.num_timesteps_output)
        self.test_input, self.test_target = generate_dataset(
            test_original_data,
            num_timesteps_input=self.config.num_timesteps_input,
            num_timesteps_output=self.config.num_timesteps_output)

        if self.config.dataset in ['metr', 'nyc-bike']:
            self.A = torch.from_numpy(A)
            self.sparse_A = self.A.to_sparse()
            self.edge_index = self.sparse_A._indices()
            self.edge_weight = self.sparse_A._values()

            # set config attributes for model initialization
            self.config.num_nodes = self.A.shape[0]
            self.config.num_edges = self.edge_weight.shape[0]
            self.config.num_edge_features = 1
            self.config.num_features = self.training_input.shape[3]
            self.log('Total nodes: {}'.format(self.config.num_nodes))
            self.log('Average degree: {:.3f}'.format(self.config.num_edges /
                                                     self.config.num_nodes))

            contains_self_loops = torch_geometric.utils.contains_self_loops(
                self.edge_index)
            self.log('Contains self loops: {}, but we add them.'.format(
                contains_self_loops))
            if not contains_self_loops:
                self.edge_index, self.edge_weight = torch_geometric.utils.add_self_loops(
                    self.edge_index,
                    self.edge_weight,
                    num_nodes=self.config.num_nodes)

        elif self.config.dataset in ['sf-example']:
            self.edge_index = torch.from_numpy(edge_index)
            self.edge_weight = torch.from_numpy(edge_weight)

            # set config attributes for model initialization
            self.config.num_nodes = len(shenzhenmask)
            self.config.num_edges = self.edge_weight.shape[0]
            self.config.num_edge_features = self.edge_weight.shape[1]
            self.config.num_features = self.training_input.shape[3]
            self.log('Total nodes: {}'.format(self.config.num_nodes))
            self.log('Average degree: {:.3f}'.format(self.config.num_edges /
                                                     self.config.num_nodes))
コード例 #6
0
    for param_group in optimizer.param_groups:
        lr = param_group['lr']
        lr = lr * (GAMMA ** (epoch // DECAY_PERIOD))
        param_group['lr'] = lr

writer = SummaryWriter()

# net = alexnet.RACNN(0.5, 200, ['fc8'], 'bvlc_alexnet.npy', True, './models/sr_50_0.4_0.1_0.0_0.0.pth')
net = alexnet.RACNN(0.5, 200, ['fc8'], 'bvlc_alexnet.npy', True)

train_loader, eval_train_loader, eval_validation_loader, num_training, num_validation = generate_dataset(
                    args.dataset,
                    args.batch,
                    args.annotation_train,
                    args.annotation_val,
                    args.data,
                    args.low_ratio,
                    args.ten_batch_eval,
                    args.verbose,
                    True)   # To get image & low image, set args.kd_enabled = True
                    # args.kd_enabled)

net.cuda()

MSE_loss = nn.MSELoss()
CE_loss = nn.CrossEntropyLoss()

# optimizer = optim.SGD([{'params': net.get_all_params_except_last_fc(), 'lr': 0.1, 'weight_decay': 0},
#                        {'params': net.classificationLayer.fc8.parameters(), 'lr':1.0, 'weight_decay': 1.0}],
#                       momentum=args.`, weight_decay=args.weight_decay)
"""
コード例 #7
0
    print("gcn package:", args.gcn_package)
    print("gcn partition:", args.gcn_partition)
    torch.manual_seed(7)

    if args.dataset == "metr":
        A, X, means, stds = load_metr_la_data()
    elif args.dataset == "nyc":
        A, X, means, stds = load_nyc_sharing_bike_data()
    elif args.dataset == "pems":
        A, X, means, stds = load_pems_d7_data()
    elif args.dataset == "pems-m":
        A, X, means, stds = load_pems_m_data()

    X, y = generate_dataset(X, 
            num_timesteps_input=args.num_timesteps_input, 
            num_timesteps_output=args.num_timesteps_output,
            dataset = args.dataset
            )

    split_line1 = int(X.shape[0] * 0.6)
    split_line2 = int(X.shape[0] * 0.8)

    training_input, training_target = X[:split_line1], y[:split_line1]
    val_input, val_target = X[split_line1:split_line2], y[split_line1:split_line2]
    test_input, test_target = X[split_line2:], y[split_line2:]

    A = torch.from_numpy(A)
    sparse_A = A.to_sparse()
    edge_index = sparse_A._indices()
    edge_weight = sparse_A._values()