예제 #1
0
def train_sample(model,
                 select_env,
                 sensor_locs,
                 num_sensors,
                 num_iter,
                 ad_matrix1,
                 ad_matrix2,
                 ad_matrix3,
                 ad_matrix4,
                 batch_size,
                 if_att=True):
    input_data, output_data = load_data(num_iter, select_env, num_sensors,
                                        sensor_locs)
    input_data2 = np.array(input_data)
    output_data2 = np.array(output_data)

    batch_matrix1 = np.zeros((data_per_epoch, num_sensors, num_sensors))
    batch_matrix2 = np.zeros((data_per_epoch, num_sensors, num_sensors))
    batch_matrix3 = np.zeros((data_per_epoch, num_sensors, num_sensors))
    batch_matrix4 = np.zeros((data_per_epoch, num_sensors, num_sensors))

    for j in range(data_per_epoch):
        if if_att == False:
            batch_matrix1[j] = localpooling_filter(ad_matrix1)
            batch_matrix2[j] = localpooling_filter(ad_matrix2)
            batch_matrix3[j] = localpooling_filter(ad_matrix3)
            batch_matrix4[j] = localpooling_filter(ad_matrix4)
        else:
            batch_matrix1[j] = ad_matrix1
            batch_matrix2[j] = ad_matrix2
            batch_matrix3[j] = ad_matrix3
            batch_matrix4[j] = ad_matrix4

    batch_input = []
    batch_output = []
    for k in range(num_sensors):
        batch_input.append(input_data2[:, k])
        batch_output.append(output_data2[:, k])
    for k in range(4):
        exec("batch_input.append(batch_matrix{})".format(k + 1))

    history = model.fit(x=batch_input,
                        y=batch_output,
                        batch_size=batch_size,
                        epochs=1,
                        shuffle=True)
    #callbacks=[TensorBoard(log_dir='mytensorboard')])
    model.save('train_data1101/gnn_dyna_1102.h5')
    del model
    K.clear_session()
    gc.collect()
    #history = 0
    return history
예제 #2
0
    def __init__(self,
                 channels,
                 adj,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super().__init__(activity_regularizer=activity_regularizer, **kwargs)
        self.channels = channels
        self.adj = adj
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False

        fltr = localpooling_filter(self.adj)
        self.fltr = sp_matrix_to_sp_tensor(fltr)
예제 #3
0
    def evaluate_actions(self, state, action):
        if self.is_cnn == False:
            cur_obs = np.zeros((len(state), 10, 84, 336, 3))
            ad_matrix1, ad_matrix2, ad_matrix3, ad_matrix4 = [], [], [], []
            for j in range(len(state)):
                new_state = form_obs(state[j], self.sensor_obs)
                cur_obs[j] = new_state
                robot_pos = [-state[j][-1][0][5], 0.5, state[j][-1][0][3]]
                new_matrix1, new_matrix2, new_matrix3, new_matrix4 = cal_admatrix(
                    robot_pos, self.env_index)
                if self.is_att == False:
                    ad_matrix1.append(localpooling_filter(new_matrix1))
                    ad_matrix2.append(localpooling_filter(new_matrix2))
                    ad_matrix3.append(localpooling_filter(new_matrix2))
                    ad_matrix4.append(localpooling_filter(new_matrix2))
                else:
                    ad_matrix1.append(new_matrix1)
                    ad_matrix2.append(new_matrix2)
                    ad_matrix3.append(new_matrix2)
                    ad_matrix4.append(new_matrix2)
            output, value = self.policy([
                cur_obs[:, 0], cur_obs[:, 1], cur_obs[:, 2], cur_obs[:, 3],
                cur_obs[:, 4], cur_obs[:, 5], cur_obs[:, 6], cur_obs[:, 7],
                cur_obs[:, 8], cur_obs[:, 9], ad_matrix1, ad_matrix2,
                ad_matrix3, ad_matrix4
            ])
        else:
            cur_obs = np.zeros((len(state), 1, 84, 336, 3))
            for j in range(len(state)):
                new_state = form_obs(state[j])
                cur_obs[j] = new_state
                robot_pos = [-state[j][-1][0][5], 0.5, state[j][-1][0][3]]
            output, value = self.policy_cnn([cur_obs[:, 0]])

        #output = tf.clip_by_value(output, -1, 1)
        dist = self.get_dist(tf.cast(output, dtype=tf.float32))
        if not self.discrete:
            action = (action - self.action_shift) / self.action_bound

        log_probs = dist.log_prob(action)
        #action = tf.clip_by_value(action, -1, 1)
        if not self.discrete:
            log_probs = tf.reduce_sum(log_probs, axis=-1)

        entropy = dist.entropy()

        return log_probs, entropy, value
예제 #4
0
def train_sample(model,
                 select_env,
                 num_sensors=10,
                 num_iter=train_iter,
                 if_att=False):
    input_data, output_data, ad_matrix1, ad_matrix2, ad_matrix3, ad_matrix4 = sample_batch(
        num_iter, select_env, num_sensors)

    batch_input = np.array(input_data)
    batch_output = np.array(output_data)
    batch_matrix1 = np.zeros((batch_size, num_sensors, num_sensors))
    batch_matrix2 = np.zeros((batch_size, num_sensors, num_sensors))
    batch_matrix3 = np.zeros((batch_size, num_sensors, num_sensors))
    batch_matrix4 = np.zeros((batch_size, num_sensors, num_sensors))

    for j in range(batch_size):
        if if_att == False:
            batch_matrix1[j] = localpooling_filter(ad_matrix1)
            batch_matrix2[j] = localpooling_filter(ad_matrix2)
            batch_matrix3[j] = localpooling_filter(ad_matrix3)
            batch_matrix4[j] = localpooling_filter(ad_matrix4)
        else:
            batch_matrix1[j] = ad_matrix1
            batch_matrix2[j] = ad_matrix2
            batch_matrix3[j] = ad_matrix3
            batch_matrix4[j] = ad_matrix4

    history = model.fit(x=[
        batch_input[:, 0], batch_input[:, 1], batch_input[:, 2],
        batch_input[:, 3], batch_input[:, 4], batch_input[:,
                                                          5], batch_input[:,
                                                                          6],
        batch_input[:, 7], batch_input[:, 8], batch_input[:, 9], batch_matrix1,
        batch_matrix2, batch_matrix3, batch_matrix4
    ],
                        y=[
                            batch_output[:, 0], batch_output[:, 1],
                            batch_output[:, 2], batch_output[:, 3],
                            batch_output[:, 4], batch_output[:, 5],
                            batch_output[:, 6], batch_output[:, 7],
                            batch_output[:, 8], batch_output[:, 9]
                        ],
                        batch_size=batch_size,
                        epochs=1,
                        shuffle=True)
    #callbacks=[TensorBoard(log_dir='mytensorboard')])
    return model, history
예제 #5
0
def train_sample():
    init_lr = 3e-5
    for num_iter in range(train_iter):
        print('start_training round:', num_iter)
        if num_iter == int(train_iter/4):
            new_lr = init_lr/10
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)
        if num_iter == int(train_iter/2):
            new_lr = init_lr/100
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)
        if num_iter == int(train_iter/1.3):
            new_lr = init_lr/1000
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)

        input_data, output_data, ad_matrix1, ad_matrix2, ad_matrix3, ad_matrix4 = sample_batch(batch_size)

        for i in range(4):
            batch_input = np.array(input_data[i])
            batch_output = np.array(output_data[i])
            batch_matrix1 = np.zeros((batch_size, num_sensors, num_sensors))
            batch_matrix2 = np.zeros((batch_size, num_sensors, num_sensors))
            batch_matrix3 = np.zeros((batch_size, num_sensors, num_sensors))
            batch_matrix4 = np.zeros((batch_size, num_sensors, num_sensors))
            
            for j in range(batch_size):
                batch_matrix1[j] = localpooling_filter(ad_matrix1[i])
                batch_matrix2[j] = localpooling_filter(ad_matrix2[i])
                batch_matrix3[j] = localpooling_filter(ad_matrix3[i])
                batch_matrix4[j] = localpooling_filter(ad_matrix4[i])
            history = model.fit(x = [batch_input[:,0], batch_input[:,1], batch_input[:,2], batch_input[:,3], 
                                     batch_input[:,4], batch_input[:,5], batch_input[:,6], batch_input[:,7], batch_input[:,8],
                                     batch_matrix1, batch_matrix2, batch_matrix3, batch_matrix4], 
                                y = [batch_output[:,0], batch_output[:,1], batch_output[:,2], batch_output[:,3],
                                     batch_output[:,4], batch_output[:,5], batch_output[:,6], batch_output[:,7], batch_output[:,8]],
                                batch_size=batch_size, epochs=1, shuffle = True)
                        #callbacks=[TensorBoard(log_dir='mytensorboard')])
            hist_df = pd.DataFrame(history.history)
            hist_csv_file = 'history0910.csv'
            with open(hist_csv_file, mode='a') as f:
                hist_df.to_csv(f) 
        if num_iter % 500 == 100:
            print('save_model')
            model.save('gnn_0911.h5')
예제 #6
0
    def act(self, state, test=False):
        if self.is_cnn == False:
            robot_pos = [-state[-1][0][5], 0.5, state[-1][0][3]]
            cur_obs = form_obs(state)
            #cur_obs = np.expand_dims(state, axis=0).astype(np.float32)
            ad_matrix1, ad_matrix2, ad_matrix3, ad_matrix4 = cal_admatrix(
                robot_pos, self.env_index)
            if self.is_att == False:
                ad_matrix1, ad_matrix2 = localpooling_filter(
                    ad_matrix1), localpooling_filter(ad_matrix2)
                ad_matrix3, ad_matrix4 = localpooling_filter(
                    ad_matrix3), localpooling_filter(ad_matrix4)
            else:
                pass
            output, value = self.policy.predict([
                np.expand_dims(cur_obs[0], axis=0),
                np.expand_dims(cur_obs[1], axis=0),
                np.expand_dims(cur_obs[2], axis=0),
                np.expand_dims(cur_obs[3], axis=0),
                np.expand_dims(cur_obs[4], axis=0),
                np.expand_dims(cur_obs[5], axis=0),
                np.expand_dims(cur_obs[6], axis=0),
                np.expand_dims(cur_obs[7], axis=0),
                np.expand_dims(cur_obs[8], axis=0),
                np.expand_dims(cur_obs[9], axis=0), ad_matrix1, ad_matrix2,
                ad_matrix3, ad_matrix4
            ])
        else:
            robot_pos = [-state[-1][0][5], 0.5, state[-1][0][3]]
            cur_obs = form_obs(state)
            output, value = self.policy_cnn.predict([cur_obs])
        output = tf.clip_by_value(output, -1, 1)
        dist = self.get_dist(output)

        if self.discrete:
            action = tf.math.argmax(output, axis=-1) if test else dist.sample()
            log_probs = dist.log_prob(action)
        else:
            action = output if test else dist.sample()
            action = tf.clip_by_value(action, -1, 1)
            #action = action.astype(np.float32)
            log_probs = tf.reduce_sum(dist.log_prob(action), axis=-1)
            action = action * self.action_bound + self.action_shift
        return action[0].numpy(), value[0][0], log_probs[0].numpy()
예제 #7
0
# Log variables
log(__file__)
vars_to_log = [
    'SEED', 'N_SAMPLES_IN_BASE', 'N_SAMPLES_IN_CLASS', 'N', 'F',
    'latent_space', 'radius', 'sigma', 'learning_rate', 'epochs', 'batch_size',
    'es_patience', 'live_classes', 'optimizer', 'losses'
]
log(''.join('- {}: {}\n'.format(v, str(eval(v))) for v in vars_to_log))

# Data normalization
print('Preprocessing data.')
ss = StandardScaler()
nf = ss.fit_transform(nf.reshape(-1, F)).reshape(-1, N, F)
nf_live = ss.transform(nf_live.reshape(-1, F)).reshape(-1, N, F)
fltr = localpooling_filter(adj)
fltr_live = localpooling_filter(adj_live)

# Train/test split
adj_train, adj_test, \
fltr_train, fltr_test, \
nf_train, nf_test = train_test_split(adj, fltr, nf, test_size=0.1)

# Train/val split
adj_train, adj_val, \
fltr_train, fltr_val, \
nf_train, nf_val = train_test_split(adj_train, fltr_train, nf_train, test_size=0.1)

# Autoencoder
model = GAE_CCM(N,
                F,
예제 #8
0
# Load data
adj, x, y = delaunay.generate_data(return_type='numpy', classes=[0, 5])

# Parameters
N = x.shape[-2]  # Number of nodes in the graphs
F = x.shape[-1]  # Original feature dimensionality
n_classes = y.shape[-1]  # Number of classes
l2_reg = 5e-4  # Regularization rate for l2
learning_rate = 1e-3  # Learning rate for Adam
epochs = 200  # Number of training epochs
batch_size = 32  # Batch size
es_patience = 10  # Patience fot early stopping
log_dir = init_logging()  # Create log directory and file

# Preprocessing
fltr = localpooling_filter(adj.copy())

# Train/test split
fltr_train, fltr_test, \
x_train, x_test,       \
y_train, y_test = train_test_split(fltr, x, y, test_size=0.1)

# Model definition
X_in = Input(shape=(N, F))
filter_in = Input((N, N))

gc1 = GraphConv(32, activation='relu',
                kernel_regularizer=l2(l2_reg))([X_in, filter_in])
gc2 = GraphConv(32, activation='relu',
                kernel_regularizer=l2(l2_reg))([gc1, filter_in])
pool = GlobalAttentionPool(128)(gc2)
예제 #9
0
 def preprocess(A):
     return localpooling_filter(A)
예제 #10
0
def train_sample(if_att=False):
    init_lr = 3e-5
    for num_iter in range(train_iter):
        print('start_training round:', num_iter)
        if num_iter == int(train_iter / 4):
            new_lr = init_lr / 10
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)
        if num_iter == int(train_iter / 2):
            new_lr = init_lr / 100
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)
        if num_iter == int(train_iter / 1.3):
            new_lr = init_lr / 1000
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)

        input_data, output_data, ad_matrix1, ad_matrix2, ad_matrix3, ad_matrix4, num_sensors = sample_batch(
            batch_size)

        for i in range(1):
            #  num of different batches, currently is 1
            '''
            if num_sensors == 9:
                for z_i in range(len(input_data[i])):    
                    input_data[i][z_i] = np.concatenate((input_data[i][z_i], np.zeros((1,84,336,3))), axis=0 )
                    output_data[i][z_i] = np.concatenate((output_data[i][z_i], np.zeros((1,2))), axis=0)
                num_sensors = 10
                ad_matrix1[i] = np.concatenate((ad_matrix1[i], np.zeros((1,9))), axis=0)
                ad_matrix1[i] = np.concatenate((ad_matrix1[i], np.zeros((10,1))), axis=-1)
                ad_matrix2[i] = np.concatenate((ad_matrix2[i], np.zeros((1,9))), axis=0)
                ad_matrix2[i] = np.concatenate((ad_matrix2[i], np.zeros((10,1))), axis=-1)
                ad_matrix3[i] = np.concatenate((ad_matrix3[i], np.zeros((1,9))), axis=0)
                ad_matrix3[i] = np.concatenate((ad_matrix3[i], np.zeros((10,1))), axis=-1)
                ad_matrix4[i] = np.concatenate((ad_matrix4[i], np.zeros((1,9))), axis=0)
                ad_matrix4[i] = np.concatenate((ad_matrix4[i], np.zeros((10,1))), axis=-1)     
            '''

            batch_input = np.array(input_data[i])
            batch_output = np.array(output_data[i])
            batch_matrix1 = np.zeros((batch_size, num_sensors, num_sensors))
            batch_matrix2 = np.zeros((batch_size, num_sensors, num_sensors))
            batch_matrix3 = np.zeros((batch_size, num_sensors, num_sensors))
            batch_matrix4 = np.zeros((batch_size, num_sensors, num_sensors))

            for j in range(batch_size):
                if if_att == False:
                    batch_matrix1[j] = localpooling_filter(ad_matrix1[i])
                    batch_matrix2[j] = localpooling_filter(ad_matrix2[i])
                    batch_matrix3[j] = localpooling_filter(ad_matrix3[i])
                    batch_matrix4[j] = localpooling_filter(ad_matrix4[i])
                else:
                    batch_matrix1[j] = ad_matrix1[i]
                    batch_matrix2[j] = ad_matrix2[i]
                    batch_matrix3[j] = ad_matrix3[i]
                    batch_matrix4[j] = ad_matrix4[i]

            history = model.fit(
                x=[
                    batch_input[:, 0],
                    batch_input[:, 1],
                    batch_input[:, 2],
                    batch_input[:, 3],
                    batch_input[:, 4],
                    batch_input[:, 5],
                    batch_input[:, 6],
                    batch_input[:, 7],
                    batch_input[:, 8],  #batch_input[:,9], 
                    batch_matrix1,
                    batch_matrix2,
                    batch_matrix3,
                    batch_matrix4
                ],
                y=[
                    batch_output[:, 0],
                    batch_output[:, 1],
                    batch_output[:, 2],
                    batch_output[:, 3],
                    batch_output[:, 4],
                    batch_output[:, 5],
                    batch_output[:, 6],
                    batch_output[:, 7],
                    batch_output[:, 8],  #batch_output[:,9]
                ],
                batch_size=batch_size,
                epochs=1,
                shuffle=True)
            #callbacks=[TensorBoard(log_dir='mytensorboard')])
            hist_df = pd.DataFrame(history.history)
            hist_csv_file = 'history1002_att.csv'
            with open(hist_csv_file, mode='a') as f:
                hist_df.to_csv(f)
        if num_iter % 500 == 100:
            print('save_model')
            model.save('gnn_1002_att.h5')
예제 #11
0
def main(time_train,
         epochs,
         C_i,
         C_o,
         learning_rate,
         kernel,
         regenerate=False):
    os.chdir(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])

    if (not os.path.exists('/media/data6TB/spandan/data.p')) or regenerate:
        from data_utils import load_data
        X, A, E = load_data(
            DATASET='data-all.json', R=300, SIGMA=1
        )  # Shapes: (171, 640, 3) (171, 640, 640) (171, 640, 640, 1)
        with open('/media/data6TB/spandan/data.p', 'wb') as pkl:
            pickle.dump((X, A, E), pkl)

    else:
        with open('/media/data6TB/spandan/data.p', 'rb') as pkl:
            X, A, E = pickle.load(pkl)

    districts = A.shape[1]

    # Inputs
    X_in = Input(shape=(districts, C_i), batch_size=time_train)
    E_in = Input(shape=(districts, districts, 1), batch_size=time_train)
    A_in = Input(shape=(districts, districts), batch_size=time_train)

    # Block
    X_i0 = tf.transpose(tf.expand_dims(X_in, axis=0), perm=[0, 2, 1, 3])
    l1 = GLU(filters=2 * C_o, kernelsize=kernel)(X_i0)
    X_i1 = tf.squeeze(tf.transpose(l1, perm=[0, 2, 1, 3]))
    E_i1 = E_in[:X_i1.shape[0], :, :, :]
    A_i1 = A_in[:X_i1.shape[0], :, :]
    l1 = GraphConv(channels=C_i, activation='relu')([X_i1, A_i1, E_i1])
    l1 = tf.expand_dims(tf.transpose(l1, perm=[1, 0, 2]), axis=0)
    l1 = GLU(filters=2 * C_o, kernelsize=kernel)(l1)

    # Block
    l2 = GLU(filters=2 * C_o, kernelsize=kernel)(l1)
    X_i2 = tf.squeeze(tf.transpose(l2, perm=[0, 2, 1, 3]))
    E_i2 = E_in[:X_i2.shape[0], :, :, :]
    A_i2 = A_in[:X_i2.shape[0], :, :]
    l2 = GraphConv(channels=C_i, activation='relu')([X_i2, A_i2, E_i2])
    l2 = tf.expand_dims(tf.transpose(l2, perm=[1, 0, 2]), axis=0)
    l2 = GLU(filters=2 * C_o, kernelsize=kernel)(l2)

    # Output layer
    l3 = GLU(filters=2 * C_i, kernelsize=(time_train - 4 * (kernel - 1)))(l2)
    X_i3 = tf.squeeze(tf.transpose(l3, perm=[0, 2, 1, 3]))
    final_output = nstack(Dense(C_i)(X_i3), time_train)

    model = Model(inputs=[X_in, E_in, A_in], outputs=final_output)
    optimizer = RMSprop(learning_rate=learning_rate)
    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',
                  weighted_metrics=['acc'])
    model.summary()

    X_input = X[:time_train, :, :]
    E_input = E[:time_train, :, :, :]
    A_input = localpooling_filter((A[:time_train, :, :]).numpy(),
                                  symmetric=True)
    output = nstack(tf.squeeze(X[time_train, :, :]), time_train)

    model.fit([X_input, E_input, A_input],
              output,
              shuffle=False,
              epochs=epochs)
    'filezilla': 0,
    'spotifyOnline': 1,
    'VLC': 2,
    'skype': 3,
    'spotifyOffline': 4,
    'winscp': 5,
    'winrar': 6
}

print("started loading saved model")
model = tensorflow.saved_model.load('trained')
print("finished loading model")

Anew, Xnew, _, _, _, _ = prep(input_data)
Anew_raw = Anew.toarray()  # array from of adj
Anew = utils.localpooling_filter(Anew).astype('f4')
Xnew = tensorflow.convert_to_tensor(Xnew, float)
Anew = tensorflow.convert_to_tensor(Anew.toarray(), float)
out = model([Xnew, Anew])

np = out.numpy()

pkl.dump(np, open("prediction.pkl", 'wb'))

condense_and_plot(
    input_data,
    Anew_raw,
    out.numpy().tolist(),
    trace_types,
    #                  [x/20 for x in range(21)],
    [0.45],
예제 #13
0
dataset = 'cora'
A, X, y, train_mask, val_mask, test_mask = citation.load_data(dataset)

# Parameters
channels = 16  # Number of channels in the first layer
N = X.shape[0]  # Number of nodes in the graph
F = X.shape[1]  # Original feature dimensionality
n_classes = y.shape[1]  # Number of classes
dropout = 0.5  # Dropout rate applied to the features
l2_reg = 5e-4  # Regularization rate for l2
learning_rate = 1e-2  # Learning rate for SGD
epochs = 20000  # Number of training epochs
es_patience = 200  # Patience for early stopping

# Preprocessing operations
fltr = localpooling_filter(A)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = GraphConv(channels,
                         activation='relu',
                         kernel_regularizer=l2(l2_reg),
                         use_bias=False)([dropout_1, fltr_in])
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = GraphConv(n_classes, activation='softmax',
                         use_bias=False)([dropout_2, fltr_in])

# Build model
예제 #14
0
def train_khop_share():
    init_lr = 3e-5
    for num_iter in range(train_iter):
        print('start_training round:', num_iter)
        if num_iter == int(train_iter/4):
            new_lr = init_lr/10
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)
        if num_iter == int(train_iter/2):
            new_lr = init_lr/100
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)
        if num_iter == int(train_iter/1.3):
            new_lr = init_lr/1000
            model.compile(optimizer=Adam(learning_rate=new_lr), loss='mse')
            print('new_learning:', init_lr)
        #randomly select a batch of sample
        select_group = np.random.randint(1,total_group1+1)
        select_group2 = np.random.randint(1,total_group2+1)
        print('select_env3_:', select_group,  '    select_env4_:', select_group2)
        if select_group == 1:    
            label_path_env3 = "target_env3_1.txt"
            ad_matrix_env3 = np.load('ad_matrix_env3_1.npy')
            ad_matrix2_env3 = np.load('ad_matrix2_env3_1.npy')
        if select_group == 2:     
            label_path_env3 = "target_env3_2.txt"
            ad_matrix_env3 = np.load('ad_matrix_env3_2.npy')
            ad_matrix2_env3 = np.load('ad_matrix2_env3_2.npy')
        if select_group == 3:     
            label_path_env3 = "target_env3_3.txt"
            ad_matrix_env3 = np.load('ad_matrix_env3_3.npy')
            ad_matrix2_env3 = np.load('ad_matrix2_env3_3.npy')
        if select_group == 4:     
            label_path_env3 = "target_env3_4.txt"
            ad_matrix_env3 = np.load('ad_matrix_env3_4.npy')
            ad_matrix2_env3 = np.load('ad_matrix2_env3_4.npy')
        if select_group == 5:     
            label_path_env3 = "target_env3_5.txt"
            ad_matrix_env3 = np.load('ad_matrix_env3_5.npy')
            ad_matrix2_env3 = np.load('ad_matrix2_env3_5.npy')
        if select_group == 6:     
            label_path_env3 = "target_env3_6.txt"
            ad_matrix_env3 = np.load('ad_matrix_env3_6.npy')
            ad_matrix2_env3 = np.load('ad_matrix2_env3_6.npy')
        
        if select_group2 == 1:    
            label_path_env4 = "target_env4_1.txt"
            ad_matrix_env4 = np.load('ad_matrix_env4_1.npy')
            ad_matrix2_env4 = np.load('ad_matrix2_env4_1.npy')
        if select_group2 == 2:     
            label_path_env4 = "target_env4_2.txt"
            ad_matrix_env4 = np.load('ad_matrix_env4_2.npy')
            ad_matrix2_env4 = np.load('ad_matrix2_env4_2.npy')
        if select_group2 == 3:     
            label_path_env4 = "target_env4_3.txt"
            ad_matrix_env4 = np.load('ad_matrix_env4_3.npy')
            ad_matrix2_env4 = np.load('ad_matrix2_env4_3.npy')
        if select_group2 == 4:     
            label_path_env4 = "target_env4_4.txt"
            ad_matrix_env4 = np.load('ad_matrix_env4_4.npy')
            ad_matrix2_env4 = np.load('ad_matrix2_env4_4.npy')
        if select_group2 == 5:     
            label_path_env4 = "target_env4_5.txt"
            ad_matrix_env4 = np.load('ad_matrix_env4_5.npy')
            ad_matrix2_env4 = np.load('ad_matrix2_env4_5.npy')
        if select_group2 == 6:     
            label_path_env4 = "target_env4_6.txt"
            ad_matrix_env4 = np.load('ad_matrix_env4_6.npy')
            ad_matrix2_env4 = np.load('ad_matrix2_env4_6.npy')
        
        filePath_env3 = 'training_env3_{}/sensor_1/1'.format(select_group)
        filelist_env3 = os.listdir(filePath_env3)
        filelist_env3.sort(key = lambda x: int(x[:-4]))
        
        filePath_env4 = 'training_env4_{}/sensor_1/1'.format(select_group2)
        filelist_env4 = os.listdir(filePath_env4)
        filelist_env4.sort(key = lambda x: int(x[:-4]))

        batch_matrix1_env3 = np.zeros((batch_size, num_sensors, num_sensors))
        batch_matrix2_env3 = np.zeros((batch_size, num_sensors, num_sensors))
        batch_matrix1_env4 = np.zeros((batch_size, num_sensors, num_sensors))
        batch_matrix2_env4 = np.zeros((batch_size, num_sensors, num_sensors))
        for i in range(batch_size):
            batch_matrix1_env3[i] = localpooling_filter(ad_matrix_env3)
            batch_matrix2_env3[i] = localpooling_filter(ad_matrix2_env3)
            batch_matrix1_env4[i] = localpooling_filter(ad_matrix_env4)
            batch_matrix2_env4[i] = localpooling_filter(ad_matrix2_env4)
        batch_matrix1 = np.concatenate((batch_matrix1_env3,batch_matrix1_env4), axis=0) 
        batch_matrix2 = np.concatenate((batch_matrix2_env3,batch_matrix2_env4), axis=0) 
        
        target_label_env3 = open(label_path_env3,"r") 
        target_label_env4 = open(label_path_env4,"r") 
        lines_env3 = target_label_env3.readlines() 
        lines_env4 = target_label_env4.readlines()
        #select_case_env3 = np.arange(batch_size)
        select_case_env3 = [np.random.randint(len(lines_env3)) for _ in range(batch_size)]
        select_case_env4 = [np.random.randint(len(lines_env4)) for _ in range(batch_size)]
        
        batch_input_env3 = []
        batch_input_env4 = []
        batch_output_env3 = s_label_batch(select_group, select_case_env3, 3)
        batch_output_env4 = s_label_batch(select_group2, select_case_env4, 4)
        
        for i in range(batch_size):
            ####### for env3
            all_sensor_input_env3 = np.zeros((num_sensors, 84, 84*4, 3))
            for idx_sensor in range(num_sensors):
                sensor_path = 'training_env3_{}/'.format(select_group) + all_sensors[idx_sensor]
                img_1 = image.load_img(sensor_path+'/1/'+filelist_env3[select_case_env3[i]], target_size=(84,84))  #height-width
                img_array_1 = image.img_to_array(img_1)
                img_2 = image.load_img(sensor_path+'/2/'+filelist_env3[select_case_env3[i]], target_size=(84,84))  #height-width
                img_array_2 = image.img_to_array(img_2)
                img_3 = image.load_img(sensor_path+'/3/'+filelist_env3[select_case_env3[i]], target_size=(84,84))  #height-width
                img_array_3 = image.img_to_array(img_3)
                img_4 = image.load_img(sensor_path+'/4/'+filelist_env3[select_case_env3[i]], target_size=(84,84))  #height-width
                img_array_4 = image.img_to_array(img_4)  
                all_sensor_input_env3[idx_sensor,:, 84*3:84*4,:] = img_array_1/255 
                all_sensor_input_env3[idx_sensor,:, 84*2:84*3,:] = img_array_2/255
                all_sensor_input_env3[idx_sensor,:, 84*1:84*2,:] = img_array_3/255
                all_sensor_input_env3[idx_sensor,:, 84*0:84*1,:] = img_array_4/255    
            batch_input_env3.append(all_sensor_input_env3.copy())
            
        for i in range(batch_size): 
            ####### for env4
            all_sensor_input_env4 = np.zeros((num_sensors, 84, 84*4, 3))
            for idx_sensor in range(num_sensors):
                sensor_path = 'training_env4_{}/'.format(select_group2) + all_sensors[idx_sensor]
                img_1 = image.load_img(sensor_path+'/1/'+filelist_env4[select_case_env4[i]], target_size=(84,84))  #height-width
                img_array_1 = image.img_to_array(img_1)
                img_2 = image.load_img(sensor_path+'/2/'+filelist_env4[select_case_env4[i]], target_size=(84,84))  #height-width
                img_array_2 = image.img_to_array(img_2)
                img_3 = image.load_img(sensor_path+'/3/'+filelist_env4[select_case_env4[i]], target_size=(84,84))  #height-width
                img_array_3 = image.img_to_array(img_3)
                img_4 = image.load_img(sensor_path+'/4/'+filelist_env4[select_case_env4[i]], target_size=(84,84))  #height-width
                img_array_4 = image.img_to_array(img_4)  
                all_sensor_input_env4[idx_sensor,:, 84*3:84*4,:] = img_array_1/255 
                all_sensor_input_env4[idx_sensor,:, 84*2:84*3,:] = img_array_2/255
                all_sensor_input_env4[idx_sensor,:, 84*1:84*2,:] = img_array_3/255
                all_sensor_input_env4[idx_sensor,:, 84*0:84*1,:] = img_array_4/255    
            batch_input_env4.append(all_sensor_input_env4.copy())
            
            #  get label data 
            #img_index = int(filelist[select_case[i]][:-4])

        batch_input = np.array(batch_input_env3+batch_input_env4) 
        batch_output = np.array(batch_output_env3+batch_output_env4)  
        history = model.fit(x = [batch_input[:,0], batch_input[:,1], batch_input[:,2], batch_input[:,3], 
                                 batch_input[:,4], batch_input[:,5], batch_input[:,6], batch_input[:,7], batch_input[:,8],
                                 batch_matrix1, batch_matrix2], 
                            y = [batch_output[:,0], batch_output[:,1], batch_output[:,2], batch_output[:,3],
                                 batch_output[:,4], batch_output[:,5], batch_output[:,6], batch_output[:,7], batch_output[:,8]],
                            batch_size=batch_size, epochs=1, shuffle = True)
                        #callbacks=[TensorBoard(log_dir='mytensorboard')])
        hist_df = pd.DataFrame(history.history)
        hist_csv_file = 'cnn_history_mix.csv'
        with open(hist_csv_file, mode='a') as f:
            hist_df.to_csv(f) 
        if num_iter % 500 == 100:
            print('save_model')
            model.save('gnn_khop_env34_share.h5')
예제 #15
0
print("Number of classes: {}".format(n_classes))
# print("X shape: {}".format(X))

# Model definition
X_in = Input(shape=(F, ))  # Input layer for X
A_in = Input((None, ), sparse=True)  # Input layer for A

graph_conv_1 = GraphConv(A.shape[0], activation='relu')([X_in, A_in])
graph_conv_2 = GraphConv(A.shape[0], activation='relu')([graph_conv_1, A_in])
graph_conv_7 = GraphConv(n_classes, activation='softmax')([graph_conv_2, A_in])
graph_conv_8 = GraphConv(n_classes, activation='softmax')([graph_conv_7, A_in])

# Build model
model = Model(inputs=[X_in, A_in], outputs=graph_conv_8)

A = utils.localpooling_filter(A).astype('f4')

model.compile(optimizer="rmsprop",
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()

validation_data = ([X, A], y, val_mask)
history = model.fit([X, A],
                    y,
                    sample_weight=train_mask,
                    epochs=60,
                    batch_size=N,
                    validation_data=validation_data,
                    shuffle=False)
# Evaluate model
예제 #16
0
        nfeat, adjacency = get_peter_graphs(N, F, T, c_m_o_, distortion)
    elif problem == 'rotation':
        nfeat, adjacency = get_rotation_graphs(N,
                                               F,
                                               T,
                                               c_m_o_,
                                               distortion,
                                               rot_type=rotation_type)
    else:
        raise ValueError('Problem can be: peter, rotation')
    np.savez(log_dir + '{}_{}_original_graph'.format(problem, c_m_o_),
             nfeat=nfeat,
             adjacency=adjacency)

    # Create filters (Laplacian)
    fltr = localpooling_filter(adjacency.copy())

    # Create regressors and targets
    adj_target = get_targets(adjacency, T, ts)
    nf_target = get_targets(nfeat, T, ts)
    fltr = get_input_sequences(fltr, T, ts)
    node_features = get_input_sequences(nfeat, T, ts)

    # Split data for sequential tests
    adj_target_seq = adj_target[T_main:]
    adj_target = adj_target[:T_main]
    nf_target_seq = nf_target[T_main:]
    nf_target = nf_target[:T_main]
    adj_seq = fltr[T_main:]
    fltr = fltr[:T_main]
    nf_seq = node_features[T_main:]