def main(_):
    # create a shared session between Keras and Tensorflow
    policy_sess = tf.Session()
    K.set_session(policy_sess)

    NUM_LAYERS = 3  # number of layers of the state space
    MAX_TRIALS = 250  # maximum number of models generated

    MAX_EPOCHS = 60  # maximum number of epochs to train
    BATCHSIZE = 100  # batchsize
    EXPLORATION = 0.5  # high exploration for the first 1000 steps
    REGULARIZATION = 1e-3  # regularization strength
    CONTROLLER_CELLS = 32  # number of cells in RNN controller
    CLIP_REWARDS = False  # clip rewards in the [-0.05, 0.05] range
    RESTORE_CONTROLLER = True  # restore controller to continue training

    # construct a state space
    state_space = StateSpace()

    # add states
    #state_space.add_state(name='kernel', values=[3])
    state_space.add_state(name='filters', values=[30, 60, 100, 144])
    #state_space.add_state(name='stride', values=[1])

    # print the state space being searched
    state_space.print_state_space()

    previous_acc = 0.0
    total_reward = 0.0

    with policy_sess.as_default():
        # create the Controller and build the internal policy network
        controller = Controller(policy_sess,
                                NUM_LAYERS,
                                state_space,
                                reg_param=REGULARIZATION,
                                exploration=EXPLORATION,
                                controller_cells=CONTROLLER_CELLS,
                                restore_controller=RESTORE_CONTROLLER)
    print('done')
    # create the Network Manager
    manager = NetworkManager(FLAGS, clip_rewards=CLIP_REWARDS)

    # get an initial random state space if controller needs to predict an
    # action from the initial state
    state = state_space.get_random_state_space(NUM_LAYERS)
    print("Initial Random State : ", state_space.parse_state_space_list(state))
    #print()

    # train for number of trails
    for trial in range(MAX_TRIALS):
        with policy_sess.as_default():
            actions = controller.get_action(
                state)  # get an action for the previous state

        # print the action probabilities
        state_space.print_actions(actions)
        print("Predicted actions : ",
              state_space.parse_state_space_list(actions))

        # build a model, train and get reward and accuracy from the network manager
        reward, previous_acc = manager.get_rewards(
            model_fn_cnn, state_space.parse_state_space_list(actions))
        print("Rewards : ", reward, "Accuracy : ", previous_acc)

        with policy_sess.as_default():

            total_reward += reward
            print("Total reward : ", total_reward)

            # actions and states are equivalent, save the state and reward
            state = actions
            controller.store_rollout(state, reward)

            # train the controller on the saved state and the discounted rewards
            loss = controller.train_step()
            print("Trial %d: Controller loss : %0.6f" % (trial + 1, loss))

            # write the results of this trial into a file
            with open('train_history.csv', mode='a+') as f:
                data = [previous_acc, reward]
                data.extend(state_space.parse_state_space_list(state))
                writer = csv.writer(f)
                writer.writerow(data)
        print()

    print("Total Reward : ", total_reward)
Esempio n. 2
0
MAX_EPOCHS = 10  # maximum number of epochs to train
CHILD_BATCHSIZE = 128  # batchsize of the child models
EXPLORATION = 0.8  # high exploration for the first 1000 steps
REGULARIZATION = 1e-3  # regularization strength
CONTROLLER_CELLS = 32  # number of cells in RNN controller
EMBEDDING_DIM = 20  # dimension of the embeddings for each state
ACCURACY_BETA = 0.8  # beta value for the moving average of the accuracy
CLIP_REWARDS = 0.0  # clip rewards in the [-0.05, 0.05] range
RESTORE_CONTROLLER = True  # restore controller to continue training

# construct a state space
state_space = StateSpace()

# add states
state_space.add_state(name='kernel', values=[1, 3])
state_space.add_state(name='filters', values=[16, 32, 64])

# print the state space being searched
state_space.print_state_space()

# prepare the training data for the NetworkManager
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

dataset = [x_train, y_train, x_test,
           y_test]  # pack the dataset for the NetworkManager
Esempio n. 3
0
MAX_EPOCHS = 10  # maximum number of epochs to train
CHILD_BATCHSIZE = 128  # batchsize of the child models
EXPLORATION = 0.8  # high exploration for the first 1000 steps
REGULARIZATION = 1e-3  # regularization strength
CONTROLLER_CELLS = 32  # number of cells in RNN controller
EMBEDDING_DIM = 20  # dimension of the embeddings for each state
ACCURACY_BETA = 0.8  # beta value for the moving average of the accuracy
CLIP_REWARDS = 0.0  # clip rewards in the [-0.05, 0.05] range
RESTORE_CONTROLLER = True  # restore controller to continue training

# construct a state space
state_space = StateSpace()

# add states
state_space.add_state(name='aggType', values=[0, 1, 2, 3, 4])
state_space.add_state(name='aggType',
                      values=[
                          "sigmoid", "tanh", "relu", "linear", "softplus",
                          "leaky_relu", "relu6"
                      ])

# print the state space being searched
state_space.print_state_space()

previous_acc = 0.0
total_reward = 0.0

with policy_sess.as_default(
):  # create the Controller and build the internal policy network
    controller = Controller(policy_sess,
Esempio n. 4
0
# state_space.add_state(name='scene-intersection', values=[0, 1])
# state_space.add_state(name='scene-construction', values=[0, 1])
# state_space.add_state(name='scene-rail', values=[0, 1])
# state_space.add_state(name='scene-toll', values=[0, 1])
# state_space.add_state(name='scene-viaduct', values=[0, 1])
# state_space.add_state(name='car', values=[0, 4, 8, 12, 16, 20, 24, 28, 32, 36])
# state_space.add_state(name='motor', values=[0, 2, 4, 6])
# state_space.add_state(name='person', values=[0, 5, 10, 15, 20, 25])
# state_space.add_state(name='truck', values=[0, 4, 8, 12])
# state_space.add_state(name='tricycle', values=[0, 2, 4, 6])
# state_space.add_state(name='bus', values=[0, 2, 5, 7])
# state_space.add_state(name='truncation', values=[0, 3, 6, 9, 12, 15, 18])
# state_space.add_state(name='occlusion', values=[0, 9, 19, 28, 38, 47,  57])
# [4, 2, 5, 1, 1, 1, 1, 1, 1, 36, 6, 25, 12, 6, 7, 18, 57]

state_space.add_state(name='vehicle',
                      values=[round(4.1 * x, 4) for x in range(0, 11)])
state_space.add_state(name='person',
                      values=[round(1.3 * x, 4) for x in range(0, 11)])
state_space.add_state(name='non-motor',
                      values=[round(1.2 * x, 4) for x in range(0, 11)])
state_space.add_state(name='group',
                      values=[round(2.0 * x, 4) for x in range(0, 11)])

state_space.add_state(name='scene1',
                      values=[round(1.0 * x, 4) for x in range(-50, 51, 2)])
state_space.add_state(name='scene2',
                      values=[round(1.0 * x, 4) for x in range(-40, 51, 2)])
state_space.add_state(name='scene3',
                      values=[round(1.0 * x, 4) for x in range(-32, 54, 2)])
state_space.add_state(name='scene4',
                      values=[round(1.0 * x, 4) for x in range(-40, 41, 2)])
NUM_LAYERS = 4  # number of layers of the state space
MAX_TRIALS = 250  # maximum number of models generated

MAX_EPOCHS = 10  # maximum number of epochs to train
BATCHSIZE = 128  # batchsize
EXPLORATION = 0.8  # high exploration for the first 1000 steps
REGULARIZATION = 1e-3  # regularization strength
CONTROLLER_CELLS = 32  # number of cells in RNN controller
CLIP_REWARDS = False  # clip rewards in the [-0.05, 0.05] range
RESTORE_CONTROLLER = True  # restore controller to continue training

# construct a state space
state_space = StateSpace()

# add states
state_space.add_state(name='kernel', values=[1, 3])
state_space.add_state(name='filters', values=[16, 32, 64])

# print the state space being searched
state_space.print_state_space()

# prepare the training data for the NetworkManager
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

dataset = [x_train, y_train, x_test, y_test]  # pack the dataset for the NetworkManager

previous_acc = 0.0
Esempio n. 6
0
schedule_in = 2
S1 = 1
OPT_TIMEPERFORMANCE = 200000

DSP_RESOURCE = 220
OPT = 1

#G=nx.DiGraph()
#DESIGN_PARA=DESIGN_PARA(IMG_SIZE,BITWIDTH,IMG_CHANNEL,DSP_RESOURCE)
#SCHEDULE=SCHEDULE(schedule_in,S1,G,OPT,NUM_LAYERS)

# construct a state space
state_space = StateSpace()

# add states
state_space.add_state(name='kernel', values=[5, 7, 14])
state_space.add_state(name='filters', values=[9, 18, 36])

#state_space.add_state(name='kernel', values=[1,3])
#state_space.add_state(name='filters', values=[8,16,32,64])
#state_space.add_state(name='filters', values=[4, 8, 16, 24])

# print the state space being searched
state_space.print_state_space()

# print(state_space[0],"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# print(state_space[1],"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")

# prepare the training data for the NetworkManager
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float16') / 255.
Esempio n. 7
0
MAX_EPOCHS = 1  # maximum number of epochs to train, adjust by xtpan from 10 to 2
CHILD_BATCHSIZE = 512  # batchsize of the child models
EXPLORATION = 0.7  # high exploration for the first 1000 steps
REGULARIZATION = 1e-3  # regularization strength
CONTROLLER_CELLS = 32  # number of cells in RNN controller
EMBEDDING_DIM = 20  # dimension of the embeddings for each state
ACCURACY_BETA = 0.8  # beta value for the moving average of the accuracy
CLIP_REWARDS = 0.0  # clip rewards in the [-0.05, 0.05] range
RESTORE_CONTROLLER = True  # restore controller to continue training
#TOP_K_CANDIDATE_ACTION = 5

# construct a state space
state_space = StateSpace()

# add states
state_space.add_state(name='embedding', values=[50, 100, 200])
state_space.add_state(name='bidirection_lstm', values=[64, 128, 256])
state_space.add_state(name='filters', values=[16, 32, 64])
state_space.add_state(name='kernel', values=[1, 3])

# print the state space being searched
state_space.print_state_space()

x_train = []
y_train = []
x_test = []
y_test = []
label_size = 0
with open('nlp/train.dat', 'r') as f:
    for line in f:
        elements = line.strip('\r\n').split('\t')
Esempio n. 8
0
RESTORE_CONTROLLER = False  # restore controller to continue training

MAX_SEQ_LENGTH = 30

MODEL_NAME = "textcnn"

# init data_helper
my_dh = dh.MyHelper(MAX_SEQ_LENGTH)
my_dh.initialize()
x_train, y_train, x_test, y_test = my_dh.read_input("../data/all_data.txt")

# construct a state space
state_space = StateSpace()

# add states
state_space.add_state(name='embedding', values=[100, 200, 300])
state_space.add_state(name='bidirection_lstm', values=[64, 128, 256])
#state_space.add_state(name='filters', values=[32, 64, 128, 256])	# Mi
state_space.add_state(name='filters', values=[16, 32, 64])  # Fawcar
state_space.add_state(name='kernel_height', values=[2, 3, 4, 5])
state_space.add_state(name='pool_weight', values=[2, 3, 4, 5])
#state_space.add_state(name='fc_size', values=[256, 512, 1024, 2048])	# Mi
state_space.add_state(name='fc_size', values=[256, 512])  # Fawcar
state_space.add_state(name="vocab_size", values=[my_dh.get_vocab_size()])
state_space.add_state(name="max_seq_length", values=[MAX_SEQ_LENGTH])
state_space.add_state(name="label_num", values=[len(my_dh.label2id.keys())])
# define model type; lstm / bilstm / lstm+bilstm / lenet
state_space.add_state(name="model_type", values=[MODEL_NAME])

# print the state space being searched
state_space.print_state_space()