예제 #1
0
                    help='base learning rate')
parser.add_argument("--batch_size",
                    type=int,
                    default=32,
                    help="size of the batches")
parser.add_argument("--epoch", type=int, default=200, help="number of epochs")
parser.add_argument('--input_data_path', type=str, default='./data')

best_acc = 0
use_cuda = torch.cuda.is_available()
opt = parser.parse_args()
torch.manual_seed(opt.seed)

shuffle = True
# Creating data indices for training and validation splits:
data_train = dataset.CSISet(data_x_train, data_y_train)
data_test = dataset.CSISet(data_x_test, data_y_test)
# dataset_size = len(data)
# indices = list(range(dataset_size))
# split = 0.8
# split = int(np.floor(split * dataset_size))
# print(split)
# if shuffle:
#     np.random.seed(opt.seed)
#     np.random.shuffle(indices)
#train_indices, val_indices = indices[split:], indices[:split]
# train_indices, val_indices = indices[:split], indices[split:]

trainloader = dataset.CSILoader(data_train, opt, shuffle=True)
testloader = dataset.CSILoader(data_test, opt, shuffle=True)
예제 #2
0
# Data
flag = 0
for data_file in glob.glob(r'{}/*.pkl'.format(opt.input_data_path)):
    with open(data_file, 'rb') as f:
        data = pickle.load(f)
        x_mean = compute_mean(data)
        if not flag:
            csifile = data['x'] - x_mean
            targetfile = data['y']
            flag = 1
        else:
            csifile = np.concatenate((csifile, data['x'] - x_mean))
            targetfile = np.concatenate((targetfile, data['y']))

data = dataset.CSISet(csifile, targetfile)

random_seed = 42
shuffle = False
# Creating data indices for training and validation splits:
dataset_size = len(data)
indices = list(range(dataset_size))
validation_split = 0.2
split = int(np.floor(validation_split * dataset_size))
print(split)
if shuffle:
    np.random.seed(random_seed)
    np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

# Creating PT data samplers and loaders:
예제 #3
0
            data_y_train = merge_ndarray(data_y_train, data['y'])

unique_train, counts_train = np.unique(data_y_train, return_counts=True)
label_counts_train = dict(zip(unique_train, counts_train))
unique_test, counts_test = np.unique(data_y_test, return_counts=True)
label_counts_test = dict(zip(unique_test, counts_test))
print(data_x_test.shape)
print('-------------Training Set Stats---------------')
print(label_counts_train)
print('------------Testing Set Stats------------')
print(label_counts_test)

# Creating data indices for training and validation splits:
if not opt.test_mode:
    data_train = dataset.CSISet(data_x_train,
                                data_y_train,
                                imf_s=IMF_S,
                                imf_selection=True)
    trainloader = dataset.CSILoader(data_train, opt, shuffle=True)
data_test = dataset.CSISet(data_x_test,
                           data_y_test,
                           imf_s=IMF_S,
                           imf_selection=True)
testloader = dataset.CSILoader(data_test, opt, shuffle=True)

print('==> Building model..')
# net = VGG('VGG19')
#net = vgg.VGG('VGG11', in_channels=64, num_classes=4 ,linear_in=1536)
net = resnet1D.ResNetCSI(num_classes=4,
                         in_channels=data_x_test.shape[2] * IMF_S)
# net = GoogLeNet()
# net = DenseNet121()