コード例 #1
0
class TerminationPolicy(Policy):
    """
    This is a binary decision, and we parametrise πterm as a single feedforward layer,
    with the hidden state as input, followed by a sigmoid function, to represent the probability of termination.
    """
    def __init__(self, hidden_state_size, entropy_reg=0.05):
        # single feedforward layer with sigmoid function
        self.model = Sequential([
            Dense(
                1,
                input_shape=(hidden_state_size, ),
                # kernel_initializer='random_uniform',  # TODO or maybe random_normal
                kernel_initializer=
                'random_normal',  # TODO or maybe random_normal
                activity_regularizer=regularizers.l1(entropy_reg)),
            Activation('sigmoid')
        ])

        # Accuracy is not the right measure for your model's performance. What you are trying to do here is more of a
        # regression task than a classification task. The same can be seen from your loss function, you are using
        # 'mean_squared_error' rather than something like 'categorical_crossentropy'.
        optimizer = optimizers.Adam()
        # lr=learning_rate  0.001 by default which is fine

        self.model.compile(
            optimizer=optimizer,
            loss=
            'binary_crossentropy'  # TODO these are random, needs to be checked
            # metrics=['accuracy']
        )
        # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True, clipvalue=0.5)  # SGD?
        # # self.model.compile(loss='categorical_crossentropy',
        # self.model.compile(loss='mean_squared_error',
        #
        #                    optimizer=sgd,
        #                    metrics=['accuracy'])

    @validation
    def output_is_valid(self, output):
        is_valid = type(output) in [bool, np.bool, np.bool_]
        msg = '{} output invalid. Expected: {} received: {}'.format(
            self.__class__.__name__, 'boolean', type(output))
        return is_valid, msg

    def forward(self, hidden_state, test=False):
        self.input_is_valid(hidden_state)

        confidence = self.model.predict(hidden_state)[0][0]
        if test:
            out = [True, False][confidence < 0.5]
        else:
            out = np.random.choice([True, False],
                                   p=[confidence, 1 - confidence])
        self.output_is_valid(out)
        return out

    def train(self, x, y, sample_weight):
        out = self.model.train_on_batch(x, y, sample_weight=sample_weight)
        return out
コード例 #2
0
def train(): # eğitim
    (x_train, y_train), (_, _) = mnist.load_data() # 1. parametre test ve eğitim verisi, 2.parametre etiketler (G de ihtiyaç yok) 
    # pixel ayarları (64,65 açıklama)
    x_train = (x_train.astype(np.float32) - 127.5) / 127.5
    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)

    g = generator() # çağırma işlemi
    d = discriminator()

    optimize = Adam(lr=learning_rate, beta_1=0.5) #optimizasyon algoritması belirleme.
    d.trainable = True # eğitilebilir olduğunu belirleriz
    d.compile(loss='binary_crossentropy', #loss fonksiyonu (performans ölçmek)
              metrics=['accuracy'], #isabet oranı
              optimizer=optimize) # eğitim gerçekleştirme

    d.trainable = False # eğitilmeyecek
    dcgan = Sequential([g, d]) #GANs oluşumu, D eğitilmeyecek
    dcgan.compile(loss='binary_crossentropy',
                  metrics=['accuracy'],
                  optimizer=optimize)

    num_batches = x_train.shape[0] // batch_size # resim sayısı / 1 dögüde girecek resim sayısı
    gen_img = np.array([np.random.uniform(-1, 1, 100) for _ in range(49)]) # rastgele gürültü oluşturma
    y_d_true = [1] * batch_size #grçekse etiket 1
    y_d_gen = [0] * batch_size # sahteyse etiket 0
    y_g = [1] * batch_size # g için etiket

    for epoch in range(num_epoch):
        
        for i in range(num_batches): #resimleri eğitiyoruz

            x_d_batch = x_train[i*batch_size:(i+1)*batch_size] # besleme için batch (32 resim)
            x_g = np.array([np.random.normal(0, 0.5, 100) for _ in range(batch_size)]) # G için gürültü oluşturma
            x_d_gen = g.predict(x_g) #resim üretme (32 adet) eğitim için

            d_loss = d.train_on_batch(x_d_batch, y_d_true) # D eğitimi (batc üzerinden,(gerçek_resim,sahte_resim))
            d_loss = d.train_on_batch(x_d_gen, y_d_gen) #etiketleri

            g_loss = dcgan.train_on_batch(x_g, y_g) # G eğitimi dcgan üzerinden (input olarak gürültü veririz)
            show_progress(epoch, i, g_loss[0], d_loss[0], g_loss[1], d_loss[1]) # helper fonksiyonundaki bilgiler bölümü
            print("****",num_batches)

        image = combine_images(g.predict(gen_img)) #resimleri birleştirip tek resim yaptık
        image = image * 127.5 + 127.5 # değerleri 0,255 arasına genişlettik
        Image.fromarray(image.astype(np.uint8)).save(image_path + "%03d.png" % (epoch)) #resim kaydetme
コード例 #3
0
def train():
    (x_train,y_train), (_,_) =mnist.load_data()
    x_train=(x_train.astype(np.float32)-127.5)/ 127.5
    x_train=x_train.reshape(x_train.shape[0],x_train.shape[1],x_train.shape[2],1)

    g=generator()
    d=discriminatar()
    #discirimiator eğitmiyouz
    optimize= Adam(lr=learning_rate, beta_1=0.5)
    d.trainable=True
    d.compile(loss='binary_crossentropy',
              metrics=['accuracy'],
              optimizer=optimize)
    d.trainable= False
     #d.trainable false verdik onun için sadece genrator eğitiiliyor
    dcgan=Sequential([g,d])
    dcgan.compile(loss='binary_crossentropy',
              metrics=['accuracy'],
              optimizer=optimize)

    num_batches =x_train.shape[0] // batch_size
    gen_img = np.array([np.random.uniform(-1,1,100) for _ in range(49)])
    y_d_true= [1]*batch_size
    y_d_gen= [0]*batch_size
    y_g= [1]*batch_size



    for epoch in range(num_epoch):
        for i in range(num_batches):
            x_d_batch =x_train[i*batch_size:(i+1)*batch_size]
            #gürültü oluşturuyoruz
            x_g= np.array([np.random.normal(0,0.5,100) for i in range(batch_size)])
            x_d_gen=g.predict(x_g)

            d_loss= d.train_on_batch(x_d_batch,y_d_true)
            d_loss= d.train_on_batch(x_d_gen,y_d_gen)

            g_loss =dcgan.train_on_batch(x_g,y_g)
            show_progress(epoch,i,g_loss[0],d_loss[0],g_loss[1],d_loss[1])

        image = combine_images(g.predict(gen_img))
        image =image*127.5+127.5
        #kaydetme
        Image.fromarray(image.astype(np.uint8)).save(image_path + "%03d.png" %(epoch))
コード例 #4
0
ファイル: dcganPerson.py プロジェクト: furkan59/GANPerson
def train():
    #tf.reset_default_graph()
    x_train = loadimages()
    x_train = (x_train.astype(np.float32) - 127.5) / 127.5
    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], CHANNEL_OF_IMAGE)

    g = generator()
    d= discriminator()

    optimize = Adam(lr=learning_rate, beta_1=0.5)
    d.trainable = True
    d.compile(loss='binary_crossentropy',
              metrics=['accuracy'],
              optimizer=optimize)

    d.trainable = False
    dcgan = Sequential([g, d])
    dcgan.compile(loss='binary_crossentropy',
                  metrics=['accuracy'],
                  optimizer=optimize)

    num_batches = x_train.shape[0] // batch_size
    gen_img = np.array([np.random.uniform(-1, 1, INPUT_DIM_START) for _ in range(49)])
    y_d_true = [1] * batch_size
    y_d_gen = [0] * batch_size
    y_g = [1] * batch_size

    for epoch in range(num_epoch):
        for i in range(num_batches):
            x_d_batch = x_train[i*batch_size:(i+1)*batch_size]
            x_g = np.array([np.random.normal(0, 0.5, INPUT_DIM_START) for _ in range(batch_size)])
            x_d_gen = g.predict(x_g)

            d_loss = d.train_on_batch(x_d_batch, y_d_true)
            d_loss = d.train_on_batch(x_d_gen, y_d_gen)

            g_loss = dcgan.train_on_batch(x_g, y_g)
            show_progress(epoch, i, g_loss[0], d_loss[0], g_loss[1], d_loss[1])

        image = combine_images(g.predict(gen_img))
        image = image * 127.5 + 127.5
        Image.fromarray(image.astype(np.uint8)).save(image_path + "%03d.png" % (epoch))
コード例 #5
0
#Accuracy score for bag of words
mnb_bow_score=accuracy_score(y_test,mnb_bow_predict)
print("mnb_bow_score :",mnb_bow_score)
#Accuracy score for tfidf features
mnb_tfidf_score=accuracy_score(y_test,mnb_tfidf_predict)
print("mnb_tfidf_score :",mnb_tfidf_score)

model1 = Sequential()
model1.add(Dense(units = 75 , activation = 'relu' , input_dim = cv_train_reviews.shape[1]))
model1.add(Dense(units = 50 , activation = 'relu'))
model1.add(Dense(units = 25 , activation = 'relu'))
model1.add(Dense(units = 10 , activation = 'relu')) 
model1.add(Dense(units = 1 , activation = 'sigmoid'))
model1.compile(optimizer = 'adam' , loss = 'binary_crossentropy' , metrics = ['accuracy'])

model1.train_on_batch(cv_train_reviews,y_train)

cnn_test_accuracy=model1.test_on_batch(cv_test_reviews,y_test)

cnn_test_accuracy[0]

from xgboost import XGBClassifier

# XGBoost Classifier model3
model2=XGBClassifier(learning_rate=0.1, n_estimators=140, max_depth=5,
                        min_child_weight=3, gamma=0.2, subsample=0.6, colsample_bytree=1.0,
                        objective='binary:logistic', nthread=4, scale_pos_weight=1, seed=27)

model2.fit(tv_train_reviews,y_train,eval_metric='auc')

preds=model2.predict(tv_test_reviews)
コード例 #6
0
class GanTrainer:
    def __init__(self, out_dir):
        check_dir_creation(out_dir)
        self.out_dir = out_dir

        # Gan Parameters
        self.generator = None
        self.discriminator = None
        self.gan = None

        self.latent_dim = None

        # Data Parameters
        self.x_train, self.y_train = None, None
        self.x_val, self.y_val = None, None
        self.x_test, self.y_test = None, None

        self.x_train_clf, self.y_train_clf = None, None
        self.x_val_clf, self.y_val_clf = None, None
        self.x_test_clf, self.y_test_clf = None, None

        self.x_train_activity = None

        # Data Processing
        self.act_id = None
        self.window_size = None
        self.step_size = None
        self.col_names = None
        self.method = None
        self.input_cols_train = None
        self.input_cols_eval = None

        # eval
        self.best_train_f1_score = 0.0
        self.best_val_f1_score = 0.0

        self.orig_svm_clf = None
        self.orig_train_acc = None
        self.orig_train_f1_score = None

        self.orig_val_acc = None
        self.orig_val_f1_score = None

    def set_out_dir(self, out_dir):
        check_dir_creation(out_dir)
        self.out_dir = out_dir

    def create_gan(self, generator, discriminator, optimizer=Adam()):
        self.latent_dim = generator.input.shape[1]

        self.generator = generator
        self.discriminator = discriminator
        self.discriminator.trainable = False

        self.gan = Sequential()
        self.gan.add(generator)
        self.gan.add(discriminator)
        self.gan.compile(loss='binary_crossentropy', optimizer=optimizer)

    def init_data(
        self,
        train_path,
        val_path,
        test_path,
        act_id,
        window_size=5 * 50,
        step_size=int(5 * 50 / 2),
        col_names=[
            'userAcceleration.x', 'userAcceleration.y', 'userAcceleration.z',
            'userAcceleration.c'
        ],
        method='sliding',
        input_cols_train=[
            'userAcceleration.x', 'userAcceleration.y', 'userAcceleration.z'
        ],
        input_cols_eval=['userAcceleration.c'],
    ):
        self.act_id = act_id
        self.window_size = window_size
        self.step_size = step_size
        self.col_names = col_names
        self.method = 'sliding'
        self.input_cols_train = input_cols_train
        self.input_cols_eval = input_cols_eval

        print('Load Data...')
        train_df = pd.read_hdf(train_path)
        val_df = pd.read_hdf(val_path)
        test_df = pd.read_hdf(test_path)

        train_windowed_df = windowing_dataframe(train_df,
                                                window_size=window_size,
                                                step_or_sample_size=step_size,
                                                col_names=col_names,
                                                method=method)
        val_windowed_df = windowing_dataframe(val_df,
                                              window_size=window_size,
                                              step_or_sample_size=step_size,
                                              col_names=col_names,
                                              method=method)
        test_windowed_df = windowing_dataframe(test_df,
                                               window_size=window_size,
                                               step_or_sample_size=step_size,
                                               col_names=col_names,
                                               method=method)

        print('Transform Data...')
        self.x_train, self.y_train = transform_windows_df(
            train_windowed_df,
            input_cols=input_cols_train,
            one_hot_encode=False,
            as_channel=False)
        self.x_val, self.y_val = transform_windows_df(
            val_windowed_df,
            input_cols=input_cols_train,
            one_hot_encode=False,
            as_channel=False)
        self.x_test, self.y_test = transform_windows_df(
            test_windowed_df,
            input_cols=input_cols_train,
            one_hot_encode=False,
            as_channel=False)

        x_train_clf, self.y_train_clf = transform_windows_df(
            train_windowed_df,
            input_cols=input_cols_eval,
            one_hot_encode=False,
            as_channel=False)
        x_val_clf, self.y_val_clf = transform_windows_df(
            val_windowed_df,
            input_cols=input_cols_eval,
            one_hot_encode=False,
            as_channel=False)
        x_test_clf, self.y_test_clf = transform_windows_df(
            test_windowed_df,
            input_cols=input_cols_eval,
            one_hot_encode=False,
            as_channel=False)

        self.x_train_clf = x_train_clf.reshape((len(x_train_clf), window_size))
        self.x_val_clf = x_val_clf.reshape((len(x_val_clf), window_size))
        self.x_test_clf = x_test_clf.reshape((len(x_test_clf), window_size))

        self.x_train_activity, _ = filter_by_activity_index(
            x=self.x_train, y=self.y_train, activity_idx=self.act_id)

        print('Calculate origin performance...')
        self.calc_origin_train_val_performance()
        print('Done!')

    def calc_origin_train_val_performance(self, verbose=False):
        if (self.x_train_clf is None) or (self.x_val_clf is None):
            print('Please run method: init_data first.')
            return

        self.orig_svm_clf = SVC()
        self.orig_svm_clf.fit(self.x_train_clf, self.y_train_clf)

        y_train_head = self.orig_svm_clf.predict(self.x_train_clf)
        self.orig_train_acc = accuracy_score(self.y_train_clf, y_train_head)
        self.orig_train_f1_score = f1_score(self.y_train_clf,
                                            y_train_head,
                                            average=None)[self.act_id]

        y_val_head = self.orig_svm_clf.predict(self.x_val_clf)
        self.orig_val_acc = accuracy_score(self.y_val_clf, y_val_head)
        self.orig_val_f1_score = f1_score(self.y_val_clf,
                                          y_val_head,
                                          average=None)[self.act_id]

        if verbose:
            print('Original training acc: ', self.orig_train_acc)
            print('Original training f1_score for act_id ', self.act_id, ': ',
                  self.orig_train_f1_score, '\n')
            print('Original validation acc: ', self.orig_val_acc)
            print('Original validation f1_score for act_id ', self.act_id,
                  ': ', self.orig_val_f1_score, '\n')

    def train_gan(self,
                  steps,
                  batch_size=64,
                  eval_step=100,
                  random=False,
                  label_smoothing=False):
        if (self.gan is None) and (self.x_train_activity is None):
            print('Please run method "create_gan" and "init_data" first.')
            return
        elif self.gan is None:
            print('Please run method "create_gan" first.')
            return
        elif self.x_train_activity is None:
            print('Please run method "init_data" first.')
            return

        start = 0
        for step in range(steps):
            random_latent_vectors = np.random.normal(
                size=(batch_size, self.generator.input_shape[1]))

            generated_sensor_data = self.generator.predict(
                random_latent_vectors)

            if random:
                index = np.random.choice(self.x_train_activity.shape[0],
                                         batch_size,
                                         replace=False)
                real_sensor_data = self.x_train_activity[index]
            else:
                stop = start + batch_size
                real_sensor_data = self.x_train_activity[start:stop]
                start += batch_size

            combined_sensor_data = np.concatenate(
                [generated_sensor_data, real_sensor_data])
            if label_smoothing:
                labels = np.concatenate([
                    smooth_labels(np.zeros((batch_size, 1)), 0.0, 0.3),
                    smooth_labels(np.ones((batch_size, 1)), -0.3, 0.3)
                ])
            else:
                labels = np.concatenate(
                    [np.zeros((batch_size, 1)),
                     np.ones((batch_size, 1))])

            # shuffle data
            combined_sensor_data, labels = shuffle(combined_sensor_data,
                                                   labels)

            d_loss = self.discriminator.train_on_batch(combined_sensor_data,
                                                       labels)

            misleading_targets = np.ones((batch_size, 1))

            a_loss = self.gan.train_on_batch(random_latent_vectors,
                                             misleading_targets)

            if start > len(self.x_train_activity) - batch_size:
                start = 0

            if step % eval_step == 0:
                save = False
                print('discriminator loss:', d_loss)
                print('adversarial loss:', a_loss)
                print('\n')

                gen_train_f1_score, gen_val_f1_score = self.eval()

                if gen_train_f1_score > self.best_train_f1_score:
                    print('Train f1-score for act_id ', self.act_id,
                          'improved from ', self.best_train_f1_score, 'to ',
                          gen_train_f1_score)
                    self.best_train_f1_score = gen_train_f1_score
                    save = True
                if gen_val_f1_score > self.best_val_f1_score:
                    print('Validation f1-score for act_id ', self.act_id,
                          'improved from ', self.best_val_f1_score, 'to ',
                          gen_val_f1_score)
                    self.best_val_f1_score = gen_val_f1_score
                    save = True

                if save:
                    self.generator.save(
                        os.path.join(
                            self.out_dir,
                            'generator_{}_tf1-{}_vf1-{}.keras'.format(
                                self.act_id, self.best_train_f1_score,
                                self.best_val_f1_score)))
                    self.discriminator.save(
                        os.path.join(
                            self.out_dir,
                            'discriminator_{}_tf1-{}_vf1-{}.keras'.format(
                                self.act_id, self.best_train_f1_score,
                                self.best_val_f1_score)))

    def eval(self, percentage=0.2):
        num_gen = int(np.ceil(len(self.x_train_activity) * percentage))
        random_latent_vectors = np.random.normal(size=(num_gen,
                                                       self.latent_dim))
        generated_sensor_data = self.generator.predict(random_latent_vectors)

        gen_df = pd.DataFrame(np.array(
            [ts.transpose() for ts in generated_sensor_data]).tolist(),
                              columns=[
                                  'userAcceleration.x', 'userAcceleration.y',
                                  'userAcceleration.z'
                              ])
        gen_df['userAcceleration.c'] = calc_consultant(gen_df)
        gen_df['act'] = self.act_id

        gen_windowed_df = windowing_dataframe(
            gen_df,
            window_size=self.window_size,
            step_or_sample_size=self.step_size,
            col_names=self.col_names,
            method=self.method)

        input_cols = ['userAcceleration.c']
        x_gen, y_gen = transform_windows_df(gen_windowed_df,
                                            input_cols=input_cols,
                                            one_hot_encode=False,
                                            as_channel=False)
        x_gen = x_gen.reshape((len(x_gen), self.window_size))

        x_train_gen = np.concatenate([self.x_train_clf, x_gen[:num_gen]])
        y_train_gen = np.concatenate([self.y_train_clf, y_gen[:num_gen]])

        svm_clf = SVC()
        svm_clf.fit(x_train_gen, y_train_gen)

        y_train_head = svm_clf.predict(self.x_train_clf)
        # train_acc = accuracy_score(self.y_train_clf, y_train_head)
        gen_train_f1_score = f1_score(self.y_train_clf,
                                      y_train_head,
                                      average=None)[self.act_id]

        y_val_head = svm_clf.predict(self.x_val_clf)
        # test_acc = accuracy_score(self.x_val_clf, y_val_head)
        gen_val_f1_score = f1_score(self.y_val_clf, y_val_head,
                                    average=None)[self.act_id]

        return gen_train_f1_score, gen_val_f1_score
コード例 #7
0
        signal = np.expand_dims(trend, 1)
        img = np.tile(
            signal,
            (IMAGE_WIDTH, 1, IMAGE_HEIGHT)) / (IMAGE_HEIGHT * IMAGE_WIDTH)
        img = np.expand_dims(np.transpose(img, axes=(1, 0, 2)), 3)

        xtrain[c] = np.expand_dims(
            np.random.normal(size=(LENGTH_VIDEO, IMAGE_HEIGHT, IMAGE_WIDTH)) /
            50, 3) + img
        xtrain[c] = xtrain[c] - np.mean(xtrain[c])
        ytrain[c] = labels_cat[NB_CLASSES]
        c = c + 1

    print('Train data (batch) generation done. Starting training...')

    history = model.train_on_batch(xtrain, ytrain)
    train_loss.append(history[0])
    train_acc.append(history[1])

    history = model.evaluate(xvalidation, yvalidation, verbose=2)

    # A. Save the model only if the accuracy is greater than before
    if (SAVE_ALL_MODELS == False):
        if (batch_nb > 0):
            f1 = open(f'{RESULTS_PATH}/statistics_loss_acc.txt', 'a')

            # save model and weights if val_acc is greater than before
            if (history[1] > np.max(val_acc)):
                model.save_weights(f'{RESULTS_PATH}/models/weights_conv3D.h5',
                                   overwrite=True)  # save (trained) weights
                print('A new model has been saved!\n')
コード例 #8
0
# RNN cell
model.add(
    LSTM(CELL_SIZE,
         batch_input_shape=(None, TIME_STEPS, INPUT_SIZE),
         dropout=0.2))  # LSTM 层

model.add(Dense(OUTPUT_SIZE))  # 二分类层

model.add(Activation('softmax'))  # Sigmoid

# optimizer
adam = Adam(LR)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# training
for step in range(4001):
    # data shape = (batch_num, steps, inputs/outputs)
    X_batch = X_train[BATCH_INDEX:BATCH_INDEX + BATCH_SIZE, :, :]
    Y_batch = y_train[BATCH_INDEX:BATCH_INDEX + BATCH_SIZE, :]
    cost = model.train_on_batch(X_batch, Y_batch)
    BATCH_INDEX += BATCH_SIZE
    BATCH_INDEX = 0 if BATCH_INDEX >= X_train.shape[0] else BATCH_INDEX

    if step % 500 == 0:
        cost, accuracy = model.evaluate(X_test,
                                        y_test,
                                        batch_size=y_test.shape[0],
                                        verbose=False)
        print('test cost: ', cost, 'test accuracy: ', accuracy)