コード例 #1
0
ファイル: DNN.py プロジェクト: YSHI17172/2D_Entry
def main():

    my_date_time = '_'.join(
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S").split())

    parameters = {
        'n_folds': n_folds,
        'n_repeats': n_repeats,
        'batch_size': batch_size,
        'nb_epochs': nb_epochs,
        'my_patience': my_patience,
        'drop_rate': drop_rate,
        'my_optimizer': my_optimizer
    }

    path_root = sys.path[0]

    name_save = path_root + '/results/' + my_date_time

    print('========== loading samples ==========')

    samples = np.load("sample_all_Train10000_200.npy")
    entry_model = Entry()
    tf = samples[:, 0, -1]
    altitute = (samples[:, 1, 0] - entry_model.constant['R0']) / 1000
    downrange = samples[:, 2, -1] * entry_model.constant['R0'] / 1000 - 100
    velocity = samples[:, 3, 0]
    gamma = samples[:, 4, 0] * 180 / np.pi

    print('Tf range: max:%.2f,min:%.2f, [s],' % (np.amax(tf), np.amin(tf)))
    print('Downrange range: max:%.2f,min:%.2f [km],' %
          (np.amax(downrange), np.amin(downrange)))
    print('Initial Altitute range: max:%.2f,min:%.2f,[km]' %
          (np.amax(altitute), np.amin(altitute)))
    print('Initial Velocity range:max:%d,min:%d, [m/s]' %
          (np.amax(velocity), np.amin(velocity)))
    print('Initial Flight Path Angle range: max:%.2f,min:%.2f,' %
          (np.amax(gamma), np.amin(gamma)))

    alpha = samples[:, -1, :]

    ys = alpha.flatten()  #output row style

    r = (samples[:, 1, :].flatten() -
         entry_model.constant['R0']) / entry_model.constant['h0']
    theta = samples[:, 2, :].flatten()
    v = samples[:, 3, :].flatten() / entry_model.constant['v0']
    g = samples[:, 4, :].flatten()

    tensors = np.column_stack((r, theta, v, g))
    tensors = tensors.astype(np.float32)

    print('input shape:', tensors.shape)

    print('========== shuffling data ==========')

    shuffled_idxs = random.sample(range(tensors.shape[0]),
                                  int(tensors.shape[0]))  # sample w/o replct
    tensors = tensors[shuffled_idxs]
    ys = ys[shuffled_idxs]
    shuffled_idxs = np.array(shuffled_idxs)

    print('========== conducting', n_folds, 'fold cross validation ==========')
    print('repeating each fold:', n_repeats, 'times')

    folds = np.array_split(tensors, n_folds, axis=0)

    print('fold sizes:', [fold.shape[0] for fold in folds])

    folds_labels = np.array_split(ys, n_folds, axis=0)

    outputs = []
    histories = []

    for i in range(n_folds):

        t = time.time()

        x_train = np.concatenate(
            [fold for j, fold in enumerate(folds) if j != i], axis=0)
        x_test = [fold for j, fold in enumerate(folds) if j == i]

        y_train = np.concatenate(
            [y for j, y in enumerate(folds_labels) if j != i], axis=0)
        y_test = [y for j, y in enumerate(folds_labels) if j == i]

        for repeating in range(n_repeats):

            print('clearing Keras session')
            K.clear_session()

            my_input = Input(shape=(4, ), dtype='float32')

            dense_1 = Dense(dense_units,
                            activation=activation_hidden)(my_input)

            dense_2 = Dense(dense_units, activation=activation_hidden)(dense_1)

            dense_3 = Dense(dense_units, activation=activation_hidden)(dense_2)

            dense_4 = Dense(dense_units, activation=activation_hidden)(dense_3)

            dense_5 = Dense(dense_units, activation=activation_hidden)(dense_4)

            dense_6 = Dense(dense_units, activation=activation_hidden)(dense_5)

            prob = Dense(1)(dense_6)

            # instantiate model
            model = Model(my_input, prob)

            # configure model for training
            model.compile(loss=my_loss_function,
                          optimizer=my_optimizer,
                          metrics=['mae', 'mse'])

            print('model compiled')

            # early_stopping = EarlyStopping(monitor='val_mean_absolute_error', # go through epochs as long as acc on validation set increases
            #                                patience=my_patience,
            #                                mode='max')

            history = model.fit(
                x_train,
                y_train,
                batch_size=batch_size,
                nb_epoch=nb_epochs,
                validation_data=(x_test, y_test),
            )
            # callbacks=[early_stopping])

            # save [min loss,max acc] on test set
            max_acc = max(model.history.history['val_mean_absolute_error'])
            max_idx = model.history.history['val_mean_absolute_error'].index(
                max_acc)
            output = [model.history.history['val_loss'][max_idx], max_acc]
            outputs.append(output)

            # also save full history for sanity checking
            histories.append(model.history.history)

        print('**** fold', i + 1,
              'done in ' + str(math.ceil(time.time() - t)) + ' second(s) ****')

    # save results to disk
    with open(name_save + '_parameters.json', 'w') as my_file:
        json.dump(parameters, my_file, sort_keys=True, indent=4)

    print('========== parameters defined and saved to disk ==========')

    with open(name_save + '_results.json', 'w') as my_file:
        json.dump({
            'outputs': outputs,
            'histories': histories
        },
                  my_file,
                  sort_keys=False,
                  indent=4)

    print('========== results saved to disk ==========')
コード例 #2
0
from TwoD_Entry_bvp import Entry
from keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt

entry_model = Entry()

samples = np.load("/work/yshi/two/sample_all_Train10000_200.npy")
r = samples[:,1,:].flatten()
v = samples[:,3,:].flatten()

def normalize(state,r,v):  
    r0 = state[0]
    v0 = state[2]
    v0 = (v0-np.mean(v))/np.std(v)
    r0 = (r0-np.mean(r))/np.std(r)
    
    return np.array([r0,state[1],v0,state[-1]])

def anti_normalize(state,r,v):
    r0 = state[0]
    v0 = state[2]
    
    v0 = v0*np.std(v) + np.mean(v) 
    r0 = r0*np.std(r) + np.mean(r)
    
    return np.array([r0,state[1],v0,state[-1]])

state = [entry_model.constant['r0'],0,entry_model.constant['v0'],entry_model.constant['gamma0']]  #r,theta,v,gamma
next_state = normalize(state,r,v)
コード例 #3
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 12:17:36 2019

@author: Yang Shi
"""

from TwoD_Entry_bvp import Entry
from keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt

entry_model = Entry()

samples = np.load("sample_all_Train10000_200.npy")
r = samples[:, 1, :].flatten()
v = samples[:, 3, :].flatten()


def normalize(state, r, v):
    r0 = state[0]
    v0 = state[2]
    v0 = (v0 - np.mean(v)) / np.std(v)
    r0 = (r0 - np.mean(r)) / np.std(r)

    return np.array([r0, state[1], v0, state[-1]])


def anti_normalize(state, r, v):
    r0 = state[0]
コード例 #4
0
ファイル: prediction_dot.py プロジェクト: YSHI17172/2D_Entry
from keras.models import load_model
import numpy as np
from keras import backend as K
import os
from TwoD_Entry_bvp import Entry

model_path = '/home/yshi/two/model/43/'

train = np.load("/work/yshi/two/sample_all_Train10000_200.npy")

samples = np.load('/work/yshi/two/sample_all_Train.npy')

entry_model = Entry()
hs = entry_model.constant['hs']
R0 = entry_model.constant['R0']
rho0 = entry_model.constant['rho0']
b0 = entry_model.constant['b0']
b2 = entry_model.constant['b2']
c1 = entry_model.constant['c1']
A = entry_model.constant['A']
m = entry_model.constant['m']
g0 = 9.81

alpha = samples[:,-1,:]*np.pi/180

theta = samples[:,2,:].flatten()
gamma = samples[:,4,:].flatten()

r = samples[:,1,:].flatten()
r_normalized = r/R0
v = samples[:,3,:].flatten()
コード例 #5
0
import numpy as np
import matplotlib.pyplot as plt
from TwoD_Entry_bvp import Entry

entry_model = Entry()

data = np.load(
    '/Users/User/Desktop/code/trajectory/Entry30_fold_1_repeating_2_0.npy')

contorl = data[:, -1]

trajectory = data[:, :-1]

step = 1e-4

time = [i * step for i in range(len(contorl))]

#indirect method results
sol = entry_model.get_optimize([14])  #14 initial guess of tf for 0 downrange

plt.figure(figsize=(14, 7))

plt.subplot(221)
plt.plot(trajectory[:, 1] * entry_model.constant['R0'] / 1000,
         (trajectory[:, 0] - entry_model.constant['R0']) / 1000,
         label='DNN-based Approach')
plt.plot(sol.y[1] * entry_model.constant['R0'] / 1000,
         (sol.y[0] - entry_model.constant['R0']) / 1000,
         label='Indirect Method')
plt.xlabel('Downrange [km]', )
plt.ylabel('Altitude [km]', )
コード例 #6
0
def main():

    my_date_time = '_'.join(
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S").split())

    parameters = {
        'dataset': dataset,
        'n_folds': n_folds,
        'n_repeats': n_repeats,
        'batch_size': batch_size,
        'nb_epochs': nb_epochs,
        'my_patience': my_patience,
        'drop_rate': drop_rate,
        'my_optimizer': my_optimizer,
        'my_loss_function': my_loss_function,
        'dense_units': dense_units,
        'layer_numer': layer_numer,
        'learning_rate': learning_rate,
    }

    path_root = sys.path[0]

    name_save = path_root + '/results/' + dataset + '_augmentation_' + my_date_time

    print('========== loading samples ==========')

    samples = np.load("/work/yshi/two/sample_all_Train10000_200.npy")
    entry_model = Entry()
    tf = samples[:, 0, -1]
    altitute = (samples[:, 1, 0] - entry_model.constant['R0']) / 1000
    downrange = samples[:, 2, -1] * entry_model.constant['R0'] / 1000 - 100
    velocity = samples[:, 3, 0]
    gamma = samples[:, 4, 0] * 180 / np.pi

    print('Tf range: max:%.2f,min:%.2f, [s],' % (np.amax(tf), np.amin(tf)))
    print('Downrange range: max:%.2f,min:%.2f [km],' %
          (np.amax(downrange), np.amin(downrange)))
    print('Initial Altitute range: max:%.2f,min:%.2f,[km]' %
          (np.amax(altitute), np.amin(altitute)))
    print('Initial Velocity range:max:%d,min:%d, [m/s]' %
          (np.amax(velocity), np.amin(velocity)))
    print('Initial Flight Path Angle range: max:%.2f,min:%.2f,' %
          (np.amax(gamma), np.amin(gamma)))

    alpha = samples[:, -1, :] * np.pi / 180

    trajectory_time = 1 - samples[:, 0, :]
    trajectory_time[:, -1] = 0
    for tra_index in range(tf.size):
        trajectory_time[
            tra_index, :] = trajectory_time[tra_index, :] * tf[tra_index]

    ys = trajectory_time.flatten()  #output row style

    #r = (samples[:,1,:].flatten() - entry_model.constant['R0'])/entry_model.constant['h0']
    theta = samples[:, 2, :].flatten()
    #v = samples[:,3,:].flatten()/ entry_model.constant['v0']
    g = samples[:, 4, :].flatten()

    r = samples[:, 1, :].flatten()
    r = (r - np.mean(r)) / np.std(r)

    v = samples[:, 3, :].flatten()
    v = (v - np.mean(v)) / np.std(v)

    tensors = np.column_stack((r, theta, v, g))
    tensors = tensors.astype(np.float32)

    print('input shape:', tensors.shape)

    print('========== shuffling data ==========')

    shuffled_idxs = random.sample(range(tensors.shape[0]),
                                  int(tensors.shape[0]))  # sample w/o replct
    tensors = tensors[shuffled_idxs]
    ys = ys[shuffled_idxs]
    shuffled_idxs = np.array(shuffled_idxs)

    print('========== conducting', n_folds, 'fold cross validation ==========')
    print('repeating each fold:', n_repeats, 'times')

    folds = np.array_split(tensors, n_folds, axis=0)

    print('fold sizes:', [fold.shape[0] for fold in folds])

    folds_labels = np.array_split(ys, n_folds, axis=0)

    outputs = []
    histories = []

    for i in range(n_folds):

        t = time.time()

        x_train = np.concatenate(
            [fold for j, fold in enumerate(folds) if j != i], axis=0)
        x_test = [fold for j, fold in enumerate(folds) if j == i]

        y_train = np.concatenate(
            [y for j, y in enumerate(folds_labels) if j != i], axis=0)
        y_test = [y for j, y in enumerate(folds_labels) if j == i]

        for repeating in range(n_repeats):

            print('clearing Keras session')
            K.clear_session()

            # instantiate model
            model = Sequential()

            model.add(Dense(dense_units, input_dim=4))

            for _ in range(layer_numer):

                model.add(Dense(dense_units, activation=activation_hidden))

            model.add(Dense(1, activation='softplus'))

            # configure model for training
            model.compile(loss=my_loss_function,
                          optimizer=Adam(lr=learning_rate),
                          metrics=['mse', 'mae'])

            print('model compiled')

            early_stopping = EarlyStopping(
                monitor=
                'val_loss',  # go through epochs as long as acc on validation set increases
                patience=my_patience)

            history = model.fit(x_train,
                                y_train,
                                batch_size=batch_size,
                                nb_epoch=nb_epochs,
                                validation_data=(x_test, y_test))
            # callbacks=[early_stopping])

            # save [min loss,max acc] on test set
            max_acc = min(history.history['val_loss'])
            max_idx = history.history['val_mean_absolute_error'].index(max_acc)
            output = [history.history['val_loss'][max_idx], max_acc]
            outputs.append(output)

            ## save model
            #save_name = dataset + '_fold_%d_repeating_%d_'%(i,repeating) + my_date_time
            #model.save('model/%s.h5'%save_name)

            # also save full history for sanity checking
            histories.append(history.history)

        print('**** fold', i + 1,
              'done in ' + str(math.ceil(time.time() - t)) + ' second(s) ****')

    # save results to disk
    with open(name_save + '_parameters.json', 'w') as my_file:
        json.dump(parameters, my_file, sort_keys=True, indent=4)

    print('========== parameters defined and saved to disk ==========')

    with open(name_save + '_results.json', 'w') as my_file:
        json.dump({
            'outputs': outputs,
            'histories': histories
        },
                  my_file,
                  sort_keys=False,
                  indent=4)

    print('========== results saved to disk ==========')
コード例 #7
0
from TwoD_Entry_bvp import Entry
from keras.models import load_model
import numpy as np

entry_model = Entry()

hs = entry_model.constant['hs']
R0 = entry_model.constant['R0']
rho0 = entry_model.constant['rho0']
b0 = entry_model.constant['b0']
b2 = entry_model.constant['b2']
c1 = entry_model.constant['c1']
A = entry_model.constant['A']
m = entry_model.constant['m']
g0 = 9.81


def anti_normalize(state):
    r0 = state[0]
    v0 = state[2]

    v0 = v0 * np.sqrt(R0 * g0)
    r0 = r0 * R0

    return np.array([r0, state[1], v0, state[-1]])


r = entry_model.constant['r0']
v = entry_model.constant['v0']
theta = 0
gamma = entry_model.constant['gamma0']
コード例 #8
0
ファイル: main.py プロジェクト: YSHI17172/2D_Entry
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 15:12:50 2019

@author: Yang Shi
"""

import numpy as np
from TwoD_Entry_bvp import Entry

model = Entry(random=True)

print(
    '------------------Random Starting Optimization--------------------------')
optimization_num = 0
op_round_max = 10000  # 成功比例
mesh_size = 200
sample_all = []
while len(sample_all) < op_round_max:
    model.reset()
    for jjj in range(100):
        tf = np.random.randint(12, 18)
        # print(tf)
        result = model.get_optimize([tf])
        optimization_num += 1
        if result:
            print('random Starting--', 'round:', len(sample_all), 'step', jjj)
            sample = model.get_sample(result, mesh_size)

            #处理多余节点