예제 #1
0
def full_piano_roll(path, receptive_field):
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        song = pm.PrettyMIDI(midi_file=str(path))
    piano_rolls = [(_.get_piano_roll(fs=song.resolution), _.program)
                   for _ in song.instruments if not _.is_drum]
    drum_rolls = [(_.get_piano_roll(fs=song.resolution), _.program)
                  for _ in song.instruments if _.is_drum]
    length = np.amax([roll.shape[1] for roll, _ in piano_rolls + drum_rolls])
    data = np.zeros(shape=(129 * 129 + 1, length))
    for roll, instrument in piano_rolls:
        data[instrument * 128:(instrument + 1) * 128] += np.pad(
            roll, [(0, 0), (0, length - roll.shape[1])], 'constant')
        data[128 * 129 + instrument] = 1
    for roll, instrument in drum_rolls:
        data[128 * 128:128 * 129] += np.pad(roll,
                                            [(0, 0),
                                             (0, length - roll.shape[1])],
                                            'constant')
        data[129 * 129 - 1] = 1
    if length >= MAX_LENGTH:
        num = np.random.randint(0, length - MAX_LENGTH + 1)
        data = data[:, num:num + MAX_LENGTH]
    data[129 * 129] += 1 - data.sum(axis=0)
    data = data > 0
    answer = np.transpose(data[:, receptive_field + 1:], (1, 0))
    return data.astype(np.float32), answer.astype(np.float32)
예제 #2
0
    def loss_values_stat(self, loss_vales):
        """ 一组loss损失的统计分析

        :param loss_vales: 一次batch中,多份样本产生的误差数据
        :return: 统计信息文本字符串
        """
        if not loss_vales:
            raise ValueError

        data = np.array(loss_vales, dtype=float)
        n, sum_ = len(data), data.sum()
        mean, std = data.mean(), data.std()
        msg = f'total_loss={sum_:.3f}, mean±std={mean:.3f}±{std:.3f}({max(data):.3f}->{min(data):.3f})'
        if sum_ < self.min_total_loss:
            self.min_total_loss = sum_
            msg = '*' + msg
        return msg
예제 #3
0
def get_row_features(data):
    sm = data.sum(axis=0)
    # fmax = np.arange(144).reshape(8,18)
    # np.max(fmax,axis=1)
    mx = np.max(data,axis=0)
    mn = np.min(data,axis=0)
    mean = np.mean(data, axis=0)
    std = np.std(data, axis=0)
    kur = kurtosis(data, axis=0)
    ske = skew(data, axis=0)
    pca = PCA(data, dimension=1)
    per20 = np.percentile(data, 20, axis=0)
    per40 = np.percentile(data, 40, axis=0)
    per60 = np.percentile(data, 60, axis=0)
    per80 = np.percentile(data, 80, axis=0)
    out = np.stack((sm,mx,mn,mean,std,kur,ske,pca,per20,per40,per60,per80))
    return out.flatten()
def load_data():
    # We'll just make our data on the spot here, but
    # we usually load real data sets from a file

    # Create 10000 random 7-bit inputs
    data = np.random.binomial(1, 0.5, size=(10000, 7))

    # Count the number of 1's in each input
    labels = data.sum(axis=1)

    # Create the binary encoding of the ground truth labels
    # As a bit of practice using Numpy, we're going to do this
    # without using a Python loop.
    labels_binary = np.unpackbits(labels.astype(np.uint8)).reshape((-1, 8))
    labels_binary = labels_binary[:, -3:]

    return (data, labels_binary)
예제 #5
0
def test(model, dtloader, epoch):
    model.eval()
    test_loss = 0

    ### Compute loss on test set
    for ind, sample in enumerate(dtloader):
        if opt.cuda:
            data, target = sample[0].cuda(), sample[1].cuda()
            
        output = model(Variable(data, volatile=True))
        thres = output.data.max(1)[1].unsqueeze(1)
        target_thres =  target.max(1)[1].unsqueeze(1)
        
        if opt.plots and ind==0 :
            temp = torch.cat((data.sum(1).unsqueeze(1), target_thres.float(), thres.float()),
                             1).view(-1, 1, data.size(-2), data.size(-1))
            saveImages(temp, '{:s}/Image{:s}.png'.format(opt.experiment, str(epoch).zfill(3)))
            
        test_loss += custom_loss(output, target, vol=True).data[0]
            
    test_loss /= len(dtloader) # loss function already averages over batch size
    print('Test set: Average loss: {:.4f}'.format(test_loss))

    return test_loss
예제 #6
0
def online_mean_and_sd(loader, data_map=None):
    """Compute the mean and sd in an online fashion
    
        Var[x] = E[X^2] - E^2[X]
    credit xwkuang5
    @https://discuss.pytorch.org/t/about-normalization-using-pre-trained-vgg16-networks/23560/7

    Args:
      loader: 
      data_map: (Default value = None)

    Returns:

    """
    cnt = 0
    # fst_moment = torch.empty(3)
    # snd_moment = torch.empty(3)
    fst_moment = np.zeros(3)
    snd_moment = np.zeros(3)

    for data in loader:
        if data_map is not None:
            data = data_map(data)
        data = np.array([t.numpy() for t in data])
        b, c, h, w = data.shape  # data here is tuple... if loader batch > 1
        nb_pixels = b * h * w
        # sum_ = torch.sum(data, dim=[0, 2, 3])
        sum_ = data.sum(axis=0).sum(axis=-1).sum(axis=-1)
        # sum_of_square = torch.sum(data ** 2, dim=[0, 2, 3])
        sum_of_square = (data ** 2).sum(axis=0).sum(axis=-1).sum(axis=-1)
        fst_moment = (cnt * fst_moment + sum_) / (cnt + nb_pixels)
        snd_moment = (cnt * snd_moment + sum_of_square) / (cnt + nb_pixels)

        cnt += nb_pixels

    return fst_moment, np.sqrt(snd_moment - fst_moment ** 2)
예제 #7
0
        global_pool3 = torch.cat([
            global_avg_pool(pool3, pool3_graph_indicator),
            global_max_pool(pool3, pool3_graph_indicator)
        ],
                                 dim=1)

        readout = global_pool1 + global_pool2 + global_pool3

        logits = self.mlp(readout)
        return logits


##loda data
data, labels = load_data("Cirrhosis")
data = data.transpose()
sums = data.sum(axis=1)
data = data.divide(sums, axis=0)
labels, label_set = pd.factorize(labels)
features_raw = list(data.columns.values)
features, features_level, map_values, map_edges = generate_map(
    data, features_raw)

# 模型输入数据准备

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
node_labels, edges_list, graph_indicator = prepare_GNN_data(
    map_values, map_edges)
train_index, test_index = split_data(graph_indicator, 0.9)
print(np.array(edges_list[0]).shape[0])
print(np.array(edges_list[0]).shape)
print(np.ones(np.array(edges_list[0]).reshape(-1, 1).shape[0]).shape)