Example #1
0
def visualize_data(data,
                   padsize=1,
                   padval=0,
                   cmap="gray",
                   image_size=(10, 10)):
    data -= data.min()
    data /= data.max()

    # force the number of filters to be square
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = ((0, n**2 - data.shape[0]), (0, padsize),
               (0, padsize)) + ((0, 0), ) * (data.ndim - 3)
    data = np.pad(data,
                  padding,
                  mode='constant',
                  constant_values=(padval, padval))

    # tile the filters into an image
    data = data.reshape((n, n) + data.shape[1:]).transpose(
        (0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) +
                        data.shape[4:])

    #plt.figure(figsize=image_size)
    plt.imshow(data, cmap=cmap)
    plt.show()
    plt.axis('off')
Example #2
0
def read_htk(inputFileNmae, framePerSecond=100):
    """
    in: file name
    """
    kinds = [
        'WAVEFORM', 'LPC', 'LPREFC', 'LPCEPSTRA', 'LPDELCEP', 'IREFC', 'MFCC',
        'FBANK', 'MELSPEC', 'USER', 'DISCRETE', 'PLP', 'ANON', '???'
    ]
    with open(inputFileNmae, 'rb') as fid:
        nf = struct.unpack(">l", fid.read(4))[0]
        fp = struct.unpack(">l", fid.read(4))[0] * -1e-7
        by = struct.unpack(">h", fid.read(2))[0]
        tc = struct.unpack(">h", fid.read(2))[0]
        tc = tc + 65536 * (tc < 0)
        cc = 'ENDACZK0VT'
        nhb = len(cc)
        ndt = 6
        hb = list(
            int(math.floor(tc * 2**x)) for x in range(-(ndt + nhb), -ndt + 1))
        dt = tc - hb[-1] * 2**ndt
        if any([dt == x for x in [0, 5, 10]]):
            aise('NOt!')
        data = np.asarray(
            struct.unpack(">" + "f" * int(by / 4) * nf, fid.read(by * nf)))
        d = data.reshape(nf, int(by / 4))
    t = kinds[min(dt, len(kinds) - 1)]
    return (d, fp, dt, tc, t)
Example #3
0
def visualize_data(data, padsize=1, padval=0, cmap="gray", image_size=(10,10)):
    data -= data.min()
    data /= data.max()

    # force the number of filters to be square
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
    data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))

    # tile the filters into an image
    data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])

    #plt.figure(figsize=image_size)
    plt.imshow(data, cmap=cmap)
    plt.show()
    plt.axis('off')
Example #4
0
def saveImagePlt(data, filepath):
    image = data.reshape(28, 28)
    plt.figure(figsize=(28, 28), dpi=1)
    plt.imshow(image, interpolation='none')
    #    plt.colorbar()
    plt.gca().get_xaxis().set_ticks_position('none')  # 目盛り非表示
    plt.gca().get_yaxis().set_ticks_position('none')  # 〃
    plt.xticks([])  # x軸目盛り値:空リストを与えることにより消す
    plt.yticks([])  # 〃
    plt.gray()
    plt.savefig(filepath)
Example #5
0
def draw_digit(data, row, col, n, _type):
    size = 28
    plt.subplot(row, col, n)
    Z = data.reshape(size, size)
    Z = Z[::-1, :]
    plt.xlim(0, 28)
    plt.ylim(0, 28)
    plt.pcolor(Z)
    plt.title("type=%s"%(_type), size=8)
    plt.gray()
    plt.tick_params(labelbottom="off")
    plt.tick_params(labelleft="off")
Example #6
0
def draw_digit3(data, n, ans, recog):
    size = 28
    plt.subplot(10, 10, n)
    Z = data.reshape(size, size)  # convert from vector to 28x28 matrix
    Z = Z[::-1, :]  # flip vertical
    plt.xlim(0, 27)
    plt.ylim(0, 27)
    plt.pcolor(Z)
    plt.title("ans=%d, recog=%d" % (ans, recog), size=8)
    plt.gray()
    plt.tick_params(labelbottom="off")
    plt.tick_params(labelleft="off")
Example #7
0
def draw_digit(data,n):
    size = 28
    plt.subplot(10,10,n)
#    X, Y = np.meshgrid(range(size),range(size))
    Z = data.reshape(size,size)   # convert from vector to 28x28 matrix
    Z = Z[::-1,:]             # flip vertical
    plt.xlim(0,27)
    plt.ylim(0,27)
#    plt.pcolor(X, Y, Z)
    plt.pcolor(Z)
    plt.gray()
    plt.tick_params(labelbottom="off")
    plt.tick_params(labelleft="off")
Example #8
0
def draw_digit_ae(data, n, row, col):
    size = 28
    G = gridspec.GridSpec(row, col)
    sp = plt.subplot(G[n//10, n%10])

    Z = data.reshape(size,size)   # convert from vector to 28x28 matrix
    Z = Z[::-1,:]                 # flip vertical
    sp.pcolor(Z)

    sp.tick_params(labelbottom="off")
    sp.tick_params(labelleft="off")
    sp.axis('scaled')
    sp.axis([0, 28, 0, 28])
Example #9
0
def read_binary(filename, domain, discretization):

    if domain.rank == 0:

        # read matrix from "output.bin"
        data = np.fromfile(filename + ".bin",
                           dtype=np.double,
                           count=discretization.nx * discretization.ny,
                           sep='')
        data = data.reshape(discretization.ny, discretization.nx)

        print("Create " + filename + ".png from matrix " + str(data.shape) +
              " in current directory !")
        graph = plt.contourf(data, 12, alpha=.75, cmap='jet')
        plt.contour(data, 12, colors='black', linewidths=0.1)
        cbar = plt.colorbar(graph)
        cbar.set_label('s value')
        plt.ylabel('Grid points on Y-axis')
        plt.xlabel('Grid points on X-axis')
        plt.title('Contour plot of the solution')
        plt.savefig("./" + filename + ".png")
def draw_digit_ae(data, n, row, col):
    size = 28
    G = gridspec.GridSpec(row, col)
    sp = plt.subplot(G[n//10, n%10])

    Z = data.reshape(size,size)   # convert from vector to 28x28 matrix
    Z = Z[::-1,:]                 # flip vertical
    sp.pcolor(Z)

    sp.tick_params(
        axis='x',          # changes apply to the x-axis
        which='both',      # both major and minor ticks are affected
        bottom='off',      # ticks along the bottom edge are off
        top='off',         # ticks along the top edge are off
        labelbottom='off') # labels along the bottom edge are off
    sp.tick_params(
        axis='y',          # changes apply to the x-axis
        which='both',      # both major and minor ticks are affected
        left='off',      # ticks along the bottom edge are off
        right='off',         # ticks along the top edge are off
        labelleft='off') # labels along the bottom edge are off
    sp.axis('scaled')
    sp.axis([0, 28, 0, 28])
Example #11
0
def draw_digit_ae(data, n, row, col):
    size = 28
    G = gridspec.GridSpec(row, col)
    sp = plt.subplot(G[n // 10, n % 10])

    Z = data.reshape(size, size)  # convert from vector to 28x28 matrix
    Z = Z[::-1, :]  # flip vertical
    sp.pcolor(Z)

    sp.tick_params(
        axis='x',  # changes apply to the x-axis
        which='both',  # both major and minor ticks are affected
        bottom='off',  # ticks along the bottom edge are off
        top='off',  # ticks along the top edge are off
        labelbottom='off')  # labels along the bottom edge are off
    sp.tick_params(
        axis='y',  # changes apply to the x-axis
        which='both',  # both major and minor ticks are affected
        left='off',  # ticks along the bottom edge are off
        right='off',  # ticks along the top edge are off
        labelleft='off')  # labels along the bottom edge are off
    sp.axis('scaled')
    sp.axis([0, 28, 0, 28])
Example #12
0
def batchify(data, batch_size):
    """Reshape data into (num_example, batch_size)"""
    nbatch = data.shape[0] // batch_size
    data = data[:nbatch * batch_size]
    data = data.reshape((batch_size, nbatch)).T
    return data
Example #13
0
def batchify(data, batch_size):
    """Reshape data into (num_example, batch_size)"""
    nbatch = data.shape[0] // batch_size
    data = data[:nbatch * batch_size]
    data = data.reshape((batch_size, nbatch)).T
    return data
Example #14
0
def main(userCount,itemCount,testSet,trainVector,trainMaskVector,\
         UseInfo_pre,topN,epochCount,pro_ZR,pro_PM,alpha):

    info_shape = UseInfo_pre.shape[1]
    UseInfo_pre = UseInfo_pre.values
    UseInfo_pre = np.insert(UseInfo_pre, 0, [0, 0, 0, 0, 0], axis=0)
    UseInfo_pre = torch.tensor(UseInfo_pre.astype(np.float32))
    result_precision = np.zeros((1, 2))

    # Build the generator and discriminator
    G = cfgan.generator(itemCount, info_shape)
    D = cfgan.discriminator(itemCount, info_shape)
    regularization = nn.MSELoss()
    d_optimizer = torch.optim.Adam(D.parameters(), lr=0.0001)
    g_optimizer = torch.optim.Adam(G.parameters(), lr=0.0001)
    G_step = 5
    D_step = 2
    batchSize_G = 32
    batchSize_D = 32

    for epoch in range(epochCount):

        # ---------------------
        #  Train Generator
        # ---------------------

        for step in range(G_step):

            # Select a random batch of purchased vector
            leftIndex = random.randint(1, userCount - batchSize_G - 1)
            realData = Variable(
                copy.deepcopy(trainVector[leftIndex:leftIndex + batchSize_G]))
            eu = Variable(
                copy.deepcopy(trainVector[leftIndex:leftIndex + batchSize_G]))
            useInfo_batch = Variable(
                copy.deepcopy(UseInfo_pre[leftIndex:leftIndex + batchSize_G]))

            # Select a random batch of negative items for every user
            n_items_pm, n_items_zr = select_negative_items(
                realData, pro_PM, pro_ZR)
            ku_zp = Variable(torch.tensor(n_items_pm + n_items_zr))
            realData_zp = Variable(torch.ones_like(realData)) * eu + Variable(
                torch.zeros_like(realData)) * ku_zp

            # Generate a batch of new purchased vector
            fakeData = G(realData, useInfo_batch)
            fakeData_ZP = fakeData * (eu + ku_zp)
            fakeData_result = D(fakeData_ZP, useInfo_batch)

            # Train the discriminator
            g_loss = np.mean(
                np.log(1. - fakeData_result.detach().numpy() + 10e-5)
            ) + alpha * regularization(fakeData_ZP, realData_zp)
            g_optimizer.zero_grad()
            g_loss.backward(retain_graph=True)
            g_optimizer.step()

        # ---------------------
        #  Train Discriminator
        # ---------------------
        for step in range(D_step):
            # Select a random batch of purchased vector
            leftIndex = random.randint(1, userCount - batchSize_D - 1)
            realData = Variable(
                copy.deepcopy(trainVector[leftIndex:leftIndex + batchSize_D]))
            eu = Variable(
                copy.deepcopy(trainVector[leftIndex:leftIndex + batchSize_G]))
            useInfo_batch = Variable(
                copy.deepcopy(UseInfo_pre[leftIndex:leftIndex + batchSize_G]))

            # Select a random batch of negative items for every user
            n_items_pm, _ = select_negative_items(realData, pro_PM, pro_ZR)
            ku = Variable(torch.tensor(n_items_pm))

            # Generate a batch of new purchased vector
            fakeData = G(realData, useInfo_batch)
            fakeData_ZP = fakeData * (eu + ku)

            # Train the discriminator
            fakeData_result = D(fakeData_ZP, useInfo_batch)
            realData_result = D(realData, useInfo_batch)
            d_loss = -np.mean(
                np.log(realData_result.detach().numpy() + 10e-5) +
                np.log(1. - fakeData_result.detach().numpy() +
                       10e-5)) + 0 * regularization(fakeData_ZP, realData_zp)
            d_optimizer.zero_grad()
            d_loss.backward(retain_graph=True)
            d_optimizer.step()

        if (epoch % 1 == 0):
            n_user = len(testSet)

            index = 0
            precisions = 0
            for testUser in testSet.keys():

                data = Variable(copy.deepcopy(trainVector[testUser]))
                useInfo_index = Variable(
                    copy.deepcopy(
                        torch.tensor(np.expand_dims(UseInfo_pre[index],
                                                    axis=0))))
                #  Exclude the purchased vector that have occurred in the training set
                result = G(data.reshape(1, 1683), useInfo_index) + Variable(
                    copy.deepcopy(trainMaskVector[index]))
                result = result.reshape(1683)

                precision = computeTopN(testSet[testUser], result, topN)
                precisions += precision
                index += 1

            precisions = precisions / n_user
            result_precision = np.concatenate(
                (result_precision, np.array([[epoch, precisions]])), axis=0)

            print(
                'Epoch[{}/{}],d_loss:{:.6f},g_loss:{:.6f},precision:{}'.format(
                    epoch, epochCount, d_loss.item(), g_loss.item(),
                    precisions))

    return result_precision
Example #15
0
def batchify(data, batch_size):
    """bachify data shape to (num_batches, batch_size)"""
    num_batches = data.shape[0] // batch_size
    data = data[:num_batches * batch_size]
    data = data.reshape((batch_size, num_batches)).T
    return data
Example #16
0
File: main.py Project: kitsing/GroC
def own_batchify(data):
    return data.reshape((-1, 800))