Esempio n. 1
0
def train():
    dataset = "cats"
    images = loaddata(dataset)
    g, d = loadmodel(dataset)
    LR = 0.0002  # initial learning rate
    B1 = 0.5  # momentum term
    opt = Adam(lr=LR, beta_1=B1)
    g.summary()
    d.summary()

    d.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=opt)
    g.compile(loss='binary_crossentropy', optimizer=opt)
    d.trainable = False
    d_on_g = creategans(d, g)
    d_on_g.compile(loss='binary_crossentropy', optimizer=opt)
    #images=getdata()
    #mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    i1, i2, i3, i4 = images.shape
    print(i1)
    epochs = 40
    batch_size = 128 // 2
    k = 0
    filename = "imscats/" + str(dataset) + "_n_epochs_" + str(epochs) + ".h5"
    for i in range(epochs):
        g.save(filename)
        for j in range(i1 // batch_size):
            noise = getnoise(batch_size * 2)
            noise_images = g.predict(noise)

            d.train_on_batch(images[j * batch_size:(j + 1) * batch_size],
                             np.ones([batch_size, 1]))
            ld = d.train_on_batch(noise_images[0:batch_size],
                                  np.zeros([batch_size, 1]))

            if (j % 10 == 0):
                print("Epoch: ", i, " D Loss: ", ld)
            #d.trainable=False
            #d.compile(loss='binary_crossentropy',optimizer=optadam,metrics=['accuracy'])
            lg = d_on_g.train_on_batch(noise, np.ones([batch_size * 2, 1]))
            #d.trainable=True
            #d.compile(loss='binary_crossentropy',optimizer=optadam,metrics=['accuracy'])
            if (j % 10 == 0):
                print("Epoch: ", i, " G Loss: ", lg)
            if (j % 100 == 0):
                showim(g, k, noise_test, [i1, i2, i3, i4])
                k = k + 1
def train():
    """
    Loads datasets {'mnist', 'flowers', 'flowers128',
     'cifar10', 'cats'} and their corresponding model
     and trains the GANs using 'epochs' epochs.
    """
    dataset="mnist"
    images=loaddata(dataset)
    g,d=loadmodel(dataset)
    opt = Adam(lr=0.0002,beta_1=0.5)
    g.summary()
    d.summary()
    d.trainable=True
    d.compile(loss='binary_crossentropy', optimizer=opt)
    g.compile(loss='binary_crossentropy', optimizer=opt)
    d.trainable=False
    gansmodel=creategans(d,g)
    gansmodel.compile(loss='binary_crossentropy', optimizer=opt)

    i1,i2,i3,i4=images.shape
    epochs=40
    batch_size=128//2
    k=0
    filename= "ims8/"+str(dataset)+"_n_epochs_"+str(epochs)+".h5"
    for i in range(epochs):
        g.save(filename)
        for j in range(i1//batch_size):
            noise=getnoise(batch_size*2)
            noise_images=g.predict(noise)

            d.train_on_batch(images[j*batch_size:(j+1)*batch_size],np.ones([batch_size,1]))
            ld=d.train_on_batch(noise_images[0:batch_size],np.zeros([batch_size,1]))

            if (j%10==0):
                print("Epoch: ",i+1," D Loss: ",ld)

            lg=gansmodel.train_on_batch(noise,np.ones([batch_size*2,1]))

            if (j%10==0):
                print("Epoch: ",i+1," G Loss: ", lg)
            if (j%100==0):
                showim(g,k,[i1,i2,i3,i4])
                k=k+1
Esempio n. 3
0
def lambda_check(layers, X_train, Y_train, X_cv, Y_cv):
    lambdas = [
        0, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 1.28, 2.56, 5.12, 10.24
    ]
    costs = []
    for lambda_regularization in lambdas:
        Theta = neuralnetwork.randomtheta(layers, X_train.shape[1])
        output = neuralnetwork.train(X_train,
                                     Y_train,
                                     Theta,
                                     lambda_regularization,
                                     maxiterations=233)

        cost = neuralnetwork.calculatecost(X_cv, Y_cv, output[0], Theta, 0)
        costs.append(cost)

    print(costs)
    print()
    print("Min Cost Is:")
    print(lambdas[numpy.argmin(costs)])
    matplotlib.pyplot.plot(lambdas, costs)
    matplotlib.pyplot.show()


if __name__ == '__main__':
    stuff = data.loaddata(sys.argv[1])
    # learningcurves([63, 63, 26], stuff.X_train, stuff.Y_train, stuff.X_cv, stuff.Y_cv, 0.16)
    lambda_check([63, 63, 26], stuff.X_train, stuff.Y_train, stuff.X_cv,
                 stuff.Y_cv)
    plot_fig(trainX, trainY, testXsave, testY, "my_polynomial_reg")


class PolynomialRegression:
    def __init__(self, degree=1):
        self.degree = degree
        self.coef_ = None
        self.intercept_ = None

    def _create_phi(self, data):
        phi = lambda x: [x**i for i in range(self.degree + 1)]
        return np.array([phi(i) for i in data]).reshape(len(data), -1)

    def fit(self, data, target):
        phi_arr = self._create_phi(data)
        w = np.dot(np.linalg.inv(phi_arr.T @ phi_arr), (phi_arr.T @ target))
        # 先頭に0を追加
        self.coef_ = np.insert(w[1:], 0, 0)
        self.intercept_ = w[0]

    def predict(self, data):
        return (data @ self.coef_) + self.intercept_


if __name__ == '__main__':
    trainX, trainY = loaddata("polynomial_reg.csv")
    # sklearn
    sk_polynomial_reg(trainX, trainY)
    # original
    my_polynomial_reg(trainX, trainY)
Esempio n. 5
0
    def __cal__distance(self, p0, p1):
        return np.sum((p0 - p1)**2)

    def fit(self, data, target):
        # あらかじめ計算できるものがない...
        self._train = data
        self._target = target

    def predict(self, test):
        labels = []
        for data in test:
            # 距離の計算
            distances = np.array(
                [self.__cal__distance(p, data) for p in self._train])
            # 近い順からn_neighbors個集める
            indexes = distances.argsort()[:self.n_neighbors]
            # n_neighbors個の中から多数決でラベルを決定
            label = Counter(self._target[indexes]).most_common(1)[0][0]
            labels.append(label)

        return labels


if __name__ == '__main__':
    trainX, trainY = loaddata()
    # sklearn
    calcref(trainX, trainY)
    # original
    my_knn(trainX, trainY)
Esempio n. 6
0
from utils import parsargs
from cfgs import cfg, updcfg
from utils import Train
from models import RetinaNet, MiniRetina
from data import loaddata
from pprint import pprint

args = parsargs()
cfg = updcfg(args, cfg)
print('The configs of the model:')
pprint(cfg)
print(cfg.detname)
if cfg.detname == 'retinanet':
    print('Loading the retinanet model ...')
    model = RetinaNet(cfg)
elif cfg.detname == 'miretina':
    print('Loading the miniretina model ...')
    model = MiniRetina(cfg)
data = loaddata(cfg)
if args.train:
    train = Train(cfg)
    train(model, data)