Ejemplo n.º 1
0
def loadMnist():
    f = gzip.open('mnist.pkl.gz', 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()
    trainingData    = MakeData.shared(train_set)
    validationData  = MakeData.shared(valid_set)
    testData        = MakeData.shared(test_set)
    return trainingData, validationData, testData
Ejemplo n.º 2
0
def experiment_0():
    datas = MakeData.make1()

    for _ in range(10):
        # datas.extend(makeData(testfunc_circle, 100, 0, 2*np.pi, 10))
        # datas.extend(makeData(testfunc_cubic, 100, 0, 2*np.pi, 0.1))
        print(datas)
        # datas.append(np.array(data))
        # 転置
        invdata = np.array(datas).T
        print(invdata)

        plt.plot(invdata[0], invdata[1])
    plt.show()

    model = hmm.GaussianHMM(n_components=10, covariance_type="full")

    model.fit(datas)
    '''
    # 学習結果を表示
    print("startprob_")
    print(model.startprob_)
    print("means_")
    print(model.means_)
    print("covars_")
    print(model.covars_)
    print("transmat_")
    print(model.transmat_)    
    '''

    # 推定.連続する同状態はカットして,繊維の様子だけ取り出す
    pre = model.predict(datas)
    result = []
    for p in pre:
        if len(result) == 0 or p != result[-1]:
            result.append(p)
    print(result)

    # makeData し直して推定した場合の状態遷移列を比較したい
    for _ in range(1000):
        datas = MakeData.make1_half()
        pre = model.predict(datas)
        result_2 = []
        for p in pre:
            if len(result_2) == 0 or p != result_2[-1]:
                result_2.append(p)
        # print(result)
        if result != result_2:
            print("Different")
            print(result_2)
            break
Ejemplo n.º 3
0
def loadCifar100():
    train_set, test_set = cifar100.load_data()
    X, Y = train_set
    X = X.reshape((X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
    Y = Y.reshape((Y.shape[0],))

    trainingData = MakeData.shared((X[0:40000, :], Y[0:40000]))
    validationData = MakeData.shared((X[40000:50000, :], Y[40000:50000]))

    X, Y = test_set
    X = X.reshape((X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
    Y = Y.reshape((Y.shape[0],))
    testData = MakeData.shared((X, Y))

    return trainingData, validationData, testData
Ejemplo n.º 4
0
def data_augmentation():
    #輝度をあげる
    os.system('bash ./initface.sh')
    DA.sepia('sp')
    DA.Shape(10, -1, 'spsp', '0')  #学習画像データを水増し(データ拡張)を行う
    mizumashi_data = ImageDataGenerator()
    mizumashi_generator = mizumashi_data.flow_from_directory(
        directory=root_dir,
        target_size=(img_size, img_size),
        batch_size=batch_size,
        shuffle=True)
    #テスト画像データを水増しする。
    val_datagen = ImageDataGenerator()
    val_gen = val_generator = val_datagen.flow_from_directory(
        directory=root_dir,
        target_size=(img_size, img_size),
        batch_size=batch_size,
        shuffle=False)
    valX, valy = val_gen.next()
    return (mizumashi_generator, val_generator, valX, valy)
Ejemplo n.º 5
0
def shrink(train, cv, test):
    row = train.shape[0]
    nums = np.sum(train + cv + test, axis=1)
    locs = list(map(lambda x: x[0], np.argwhere(nums > 0)))
    outTrain = train[locs]
    outTest = test[locs]
    outCv = cv[locs]
    return outTrain, outCv, outTest


def transpose(train, cv, test):
    return np.transpose(train), np.transpose(cv), np.transpose(test)


maker = MakeData.ScipyMatMaker()
train = maker.ReadMat(TRAIN_FILE)
cv = maker.ReadMat(CV_FILE)
test = maker.ReadMat(TEST_FILE)
print(train.shape)
train, cv, test = shrink(train, cv, test)
train, cv, test = transpose(train, cv, test)
train, cv, test = shrink(train, cv, test)
train, cv, test = transpose(train, cv, test)
# with open(TRAIN_FILE, 'wb') as fs:
# 	pickle.dump(train, fs)
# with open(TEST_FILE, 'wb') as fs:
# 	pickle.dump(test, fs)
# with open(CV_FILE, 'wb') as fs:
# 	pickle.dump(cv, fs)
print(train.shape)
Ejemplo n.º 6
0
result_images = "./result/out.png"

# In[579]:

#輝度をあげます
import MakeData as DA
#重複防止

# In[597]:

os.system('./initface.sh ')

# In[598]:
#水増し部分
if (args[1] == '0'):
    DA.sepia('sp')
    # DA.data_eraser(int(110/2))
elif (args[1] == '1'):
    DA.CE_gray('red', 'red')
    DA.Shape(10, -1, 'red', '0')
    # DA.data_eraser(int(110/2))
elif (args[1] == '2'):
    DA.CE_gray('green', 'green')
    DA.Shape(10, -1, 'green', '0')
    # DA.data_eraser(int(110/2))
elif (args[1] == '3'):
    DA.CE_gray('blue', 'blue')
    DA.Shape(10, -1, 'blue', '0')
    # DA.data_eraser(int(110/2))
elif (args[1] == '4'):
    DA.Shape(10, -1, 'sp', '0')
Ejemplo n.º 7
0
    heapSizeLower = int(sys.argv[1])
    heapSizeUpper = int(sys.argv[2])
    heapAmountLower = int(sys.argv[3])
    heapAmountUpper = int(sys.argv[4])
    f = open("mlpstatistics.txt", "w")
    filename = "CorrectMoves.txt"
    player = PP.PerfectPlayer()
    #The loop that defines the start-states from which the programs will be run.
    for heapSize in range(heapSizeLower, heapSizeUpper + 1):
        for heapAmount in range(heapAmountLower, heapAmountUpper + 1):
            """The Supervised learning program is run 10 times for each start-state, and plays against the perfect player each time,
			to get a winrate for statistics. The time used is also written to file as statistics. The data used is the same for a given state,
			but it is sorted differently for each time."""
            state = [heapSize] * heapAmount
            print(state)
            md = MD.MakeData()
            md.setup(state)
            timeAvg = 0
            percent = 0
            for i in range(10):
                #sorting and separating the data into the needed categories.
                data = np.loadtxt(filename)

                stateLen = len(data[0])

                boards = data[::2]
                moves = data[1::2]

                order = list(range(np.shape(boards)[0]))
                np.random.shuffle(order)
                boards = boards[order, :]