def worker_GS(input_worker):
    """Explanations"""
    #Global variables:

    global numInput, numOutput, numHidden1, numHidden2
    global dim_hidden2_output, dim_input_hidden1, dim_hidden1_hidden2
    global env
    global sigma
    global VBN_dict

    #Local:

    seed = int(input_worker[0])
    epsilon = input_worker[1]
    p = input_worker[2]

    env.seed(seed)
    #np.random.seed(seed)

    #Neural Networks:
    NN = NeuralNetwork(numInput, numHidden1, numHidden2, numOutput, VBN_dict)

    NN.use_VBN = False

    NN.W1 = toeplitz(p[0][numInput:], p[0][:numInput])
    NN.W2 = toeplitz(p[1][numHidden1:], p[1][:numHidden1])
    NN.W3 = toeplitz(p[2][numHidden2:], p[2][:numHidden2])

    #distortions

    epsilon_W1 = epsilon[0:numHidden1 + numInput]
    epsilon_W2 = epsilon[numHidden1 + numInput:numHidden1 + numInput +
                         numHidden2 + numHidden1]
    epsilon_W3 = epsilon[numHidden1 + numInput + numHidden2 +
                         numHidden1:numHidden1 + numInput + numHidden2 +
                         numHidden1 + numOutput + numHidden2]

    #parameters update
    NN.W1 = NN.W1 + sigma * toeplitz(epsilon_W1[numInput:],
                                     epsilon_W1[:numInput])
    NN.W2 = NN.W2 + sigma * toeplitz(epsilon_W2[numHidden1:],
                                     epsilon_W2[:numHidden1])
    NN.W3 = NN.W3 + sigma * toeplitz(epsilon_W3[numHidden2:],
                                     epsilon_W3[:numHidden2])

    initial_observation = env.reset()

    reward_worker = episodeRoute(NN, env, initial_observation, steps=1000)

    return (reward_worker, epsilon_W1, epsilon_W2, epsilon_W3)
Ejemplo n.º 2
0
def worker_GS(input_worker):
    """Explanations"""
    #Global variables:

    global numInput, numOutput, numHidden1, numHidden2
    global dim_hidden2_output, dim_input_hidden1, dim_hidden1_hidden2
    global env
    global sigma
    global VBN_dict

    #Local:

    seed = int(input_worker[0])
    epsilon = input_worker[1]
    p = input_worker[2]

    env.seed(seed)
    #np.random.seed(seed)

    #Neural Networks:
    NN = NeuralNetwork(numInput, numHidden1, numHidden2, numOutput, VBN_dict)

    NN.use_VBN = False

    NN.W1 = p[0]
    NN.W2 = p[1]
    NN.W3 = p[2]

    #distortions

    epsilon_W3 = epsilon[0:dim_hidden2_output].reshape((numOutput, numHidden2))
    epsilon_W2 = epsilon[dim_hidden2_output:dim_hidden2_output +
                         dim_hidden1_hidden2].reshape((numHidden2, numHidden1))
    epsilon_W1 = epsilon[dim_hidden2_output +
                         dim_hidden1_hidden2:dim_hidden2_output +
                         dim_hidden1_hidden2 + dim_input_hidden1].reshape(
                             (numHidden1, numInput))

    #parameters update
    NN.W1 = NN.W1 + epsilon_W1 * sigma
    NN.W2 = NN.W2 + epsilon_W2 * sigma
    NN.W3 = NN.W3 + epsilon_W3 * sigma

    initial_observation = env.reset()

    reward_worker = episodeRoute(NN, env, initial_observation, steps=250)

    return (reward_worker, epsilon_W1, epsilon_W2, epsilon_W3)
def worker(input_worker):
    """Explanations"""
    #Global variables:
    global numInput, numOutput, numHidden1, numHidden2
    global dim_hidden2_output, dim_input_hidden1, dim_hidden1_hidden2
    global sigma
    global env

    #Local:

    seed = int(input_worker[0])
    p = input_worker[1]

    env.seed(seed)
    np.random.seed(seed)

    #Neural Networks:
    NN = NeuralNetwork(numInput, numHidden1, numHidden2, numOutput, VBN_dict)

    NN.W1 = toeplitz(p[0][numInput:], p[0][:numInput])
    NN.W2 = toeplitz(p[1][numHidden1:], p[1][:numHidden1])
    NN.W3 = toeplitz(p[2][numHidden2:], p[2][:numHidden2])

    #distortions

    epsilon = np.random.multivariate_normal(
        np.zeros(dim_hidden2_output + dim_input_hidden1 + dim_hidden1_hidden2),
        np.identity(dim_hidden2_output + dim_input_hidden1 +
                    dim_hidden1_hidden2))
    epsilon_W3 = epsilon[0:dim_hidden2_output].reshape((numOutput, numHidden2))
    epsilon_W2 = epsilon[dim_hidden2_output:dim_hidden2_output +
                         dim_hidden1_hidden2].reshape((numHidden2, numHidden1))
    epsilon_W1 = epsilon[dim_hidden2_output +
                         dim_hidden1_hidden2:dim_hidden2_output +
                         dim_hidden1_hidden2 + dim_input_hidden1].reshape(
                             (numHidden1, numInput))

    #parameters update
    NN.W1 = NN.W1 + epsilon_W1 * sigma
    NN.W2 = NN.W2 + epsilon_W2 * sigma
    NN.W3 = NN.W3 + epsilon_W3 * sigma

    #initial_observation=env.reset()

    reward_worker = episodeRoute(NN, env, initial_observation, steps=250)

    return (reward_worker, epsilon_W1, epsilon_W2, epsilon_W3)
Ejemplo n.º 4
0
def worker(input_worker):
    """Explanations"""
    #Global variables:
    global numInput,numOutput,numHidden
    global dim_hidden_output, dim_hidden_output
    global sigma
    global env
    
    #Local:
    
    seed = int(input_worker[0])
    p = input_worker[1]
    
    env.seed(seed) 
    np.random.seed(seed)
    
    #Neural Networks:
    NN = NeuralNetwork(numInput,numHidden,numOutput, VBN_dict)
    
    NN.wi=p[0]
    NN.wo=p[1]
    
    #distortions
    
    epsilon = np.random.multivariate_normal(np.zeros(dim_hidden_output+dim_input_hidden),np.identity(dim_hidden_output+dim_input_hidden))
    epsilon_wo= epsilon[0:dim_hidden_output].reshape((numHidden,numOutput))
    epsilon_wi= epsilon[dim_hidden_output:dim_hidden_output+dim_input_hidden].reshape((numInput,numHidden))

    
    #epsilon_wo = np.random.multivariate_normal([0 for x in range(dim_hidden_output)],np.identity(dim_hidden_output)).reshape((numHidden,numOutput))
    #epsilon_wi = np.random.multivariate_normal([0 for x in range(dim_input_hidden)],np.identity(dim_input_hidden)).reshape((numInput,numHidden))
    
    #parameters update
    NN.wo=NN.wo+epsilon_wo*sigma #remark:we should merge the two, and reshape the matrix
    NN.wi=NN.wi+epsilon_wi*sigma
    
    #initial_observation=env.reset()
    
    reward_worker=episodeRoute(NN,env,initial_observation,steps=250)
    
    
    return(reward_worker,epsilon_wi,epsilon_wo)
def worker_train_VBN(input_worker_VBN):
    """Explanations"""
    #Global variables:
    global numInput, numOutput, numHidden1, numHidden2
    global dim_hidden2_output, dim_input_hidden1, dim_hidden1_hidden2
    global env

    #Local:

    seed = int(input_worker_VBN[0])
    p = input_worker_VBN[1]

    env.seed(seed)
    #np.random.seed(seed)
    VBN_dict = {}
    #VBN_dict['mu_i']=np.zeros((numInput,1))
    #VBN_dict['var_i']=np.ones((numInput,1))
    VBN_dict['mu_h1'] = np.zeros((numHidden1, 1))
    VBN_dict['var_h1'] = np.ones((numHidden1, 1))
    VBN_dict['mu_h2'] = np.zeros((numHidden2, 1))
    VBN_dict['var_h2'] = np.zeros((numHidden2, 1))

    #Neural Networks:
    NN = NeuralNetwork(numInput, numHidden1, numHidden2, numOutput, VBN_dict)

    NN.W1 = toeplitz(p[0][numInput:], p[0][:numInput])
    NN.W2 = toeplitz(p[1][numHidden1:], p[1][:numHidden1])
    NN.W3 = toeplitz(p[2][numHidden2:], p[2][:numHidden2])

    #SHOULD IT BE PLACED IN THE LOOP ? CANT THINK RIGHT NOW
    sum_zh1 = [0.] * numHidden1
    sum_zh2 = [0.] * numHidden2
    #sum_zi=[0.] * numInput

    sum_zh1_sq = [0.] * numHidden1
    sum_zh2_sq = [0.] * numHidden2
    #sum_zi_sq=[0.] * numInput

    steps = 1000
    Ai = env.reset()
    num_step = steps
    NN.use_VBN = False  #we don't want to use feedforward options with VBN to compute the statistics

    for j in range(steps):

        Ao = NN.feedForward(Ai)

        sum_zh1 = [sum(x) for x in zip(sum_zh1, NN.Z1)]
        sum_zh2 = [sum(x) for x in zip(sum_zh2, NN.Z2)]
        #sum_zi=[sum(x) for x in zip(sum_zi, NN.Ai)]

        sum_zh1_sq = [sum(x) for x in zip(sum_zh1_sq, square(NN.Z1))]
        sum_zh2_sq = [sum(x) for x in zip(sum_zh2_sq, square(NN.Z2))]
        #sum_zi_sq=[sum(x) for x in zip(sum_zi_sq, square(NN.Ai))]

        action = np.argmax(Ao)
        Ai, reward, done, info = env.step(action)

        if done:
            break
            num_step = j

    #return(sum_zi,sum_zh1,sum_zh2,sum_zi_sq,sum_zh1_sq,sum_zh2_sq,num_step)
    return (sum_zh1, sum_zh2, sum_zh1_sq, sum_zh2_sq, num_step)
Ejemplo n.º 6
0
def worker_GS(input_worker):
    """Explanations"""
    #Global variables:

    global numInput, numOutput, numHidden1, numHidden2
    global dim_hidden2_output, dim_input_hidden1, dim_hidden1_hidden2
    global env
    global sigma
    global VBN_dict

    #Local:

    seed = int(input_worker[0])
    epsilon = input_worker[1]
    p = input_worker[2]

    env.seed(seed)
    #np.random.seed(seed)

    #Neural Networks:
    NN = NeuralNetwork(numInput, numHidden1, numHidden2, numOutput, VBN_dict)

    NN.W1 = toeplitz(p[0][numInput:], p[0][:numInput])
    NN.W2 = toeplitz(p[1][numHidden1:], p[1][:numHidden1])
    NN.W3 = toeplitz(p[2][numHidden2:], p[2][:numHidden2])

    NN.use_VBN = True

    NN.gamma1 = p[3]
    NN.beta1 = p[4]
    NN.gamma2 = p[5]
    NN.beta2 = p[6]

    #distortions

    epsilon_W1 = epsilon[0:numHidden1 + numInput]
    epsilon_W2 = epsilon[numHidden1 + numInput:numHidden1 + numInput +
                         numHidden2 + numHidden1]
    epsilon_W3 = epsilon[numHidden1 + numInput + numHidden2 +
                         numHidden1:numHidden1 + numInput + numHidden2 +
                         numHidden1 + numOutput + numHidden2]

    dim_temp = numHidden1 + numInput + numHidden2 + numHidden1 + numOutput + numHidden2

    epsilon_beta1 = epsilon[dim_temp:dim_temp + numHidden1].reshape(
        (numHidden1, 1))
    epsilon_gamma1 = epsilon[dim_temp + numHidden1:dim_temp +
                             2 * numHidden1].reshape((numHidden1, 1))
    epsilon_beta2 = epsilon[dim_temp + 2 * numHidden1:dim_temp +
                            2 * numHidden1 + numHidden2].reshape(
                                (numHidden2, 1))
    epsilon_gamma2 = epsilon[dim_temp + 2 * numHidden1 + numHidden2:dim_temp +
                             2 * numHidden1 + 2 * numHidden2].reshape(
                                 (numHidden2, 1))

    #parameters update
    NN.W1 = NN.W1 + sigma * toeplitz(epsilon_W1[numInput:],
                                     epsilon_W1[:numInput])
    NN.W2 = NN.W2 + sigma * toeplitz(epsilon_W2[numHidden1:],
                                     epsilon_W2[:numHidden1])
    NN.W3 = NN.W3 + sigma * toeplitz(epsilon_W3[numHidden2:],
                                     epsilon_W3[:numHidden2])

    NN.beta1 = NN.beta1 + epsilon_beta1 * sigma
    NN.gamma1 = NN.gamma1 + epsilon_gamma1 * sigma
    NN.beta2 = NN.beta2 + epsilon_beta2 * sigma
    NN.gamma2 = NN.gamma2 + epsilon_gamma2 * sigma

    initial_observation = env.reset()

    reward_worker = episodeRoute(NN, env, initial_observation, steps=1000)

    return (reward_worker, epsilon_W1, epsilon_W2, epsilon_W3, epsilon_gamma1,
            epsilon_beta1, epsilon_gamma2, epsilon_beta2)
Ejemplo n.º 7
0
def worker_train_VBN(input_worker_VBN):
    """Explanations"""
    #Global variables:
    global numInput,numOutput,numHidden
    global dim_hidden_output, dim_hidden_output
    global env
    
    
    #Local:

    seed=int(input_worker_VBN[0])
    p = input_worker_VBN[1]
    
    env.seed(seed) 
    #np.random.seed(seed)    
    VBN_dict = {}
    VBN_dict['mu_i']=0
    VBN_dict['var_i']=0
    VBN_dict['mu_h']=0
    VBN_dict['var_h']=0
    VBN_dict['mu_o']=0
    VBN_dict['var_o']=0    
    
    
    #Neural Networks:
    NN = NeuralNetwork(numInput,numHidden,numOutput, VBN_dict)    
    NN.wi=p[0]
    NN.wo=p[1]
    
    steps=250
    ai = env.reset()  
    num_step=steps
    
    for j in range(steps):
        
        ao = NN.feedForward(ai)
        #to transfer to the main 
        # question: how many worker for this ?
        
        sum_zi=[0.] * numInput        
        sum_zh=[0.] * numHidden  
        sum_zo=[0.] *numOutput
        sum_zi2=[0.] * numInput        
        sum_zh2=[0.] * numHidden  
        sum_zo2=[0.] *numOutput
        
 
        
        sum_zi=[sum(x) for x in zip(sum_zi, NN.zi)]   ### VERY WEIRD !! ALWAYS EQUAL TO 1 ?
        sum_zh=[sum(x) for x in zip(sum_zh, NN.zh)]
        sum_zo=[sum(x) for x in zip(sum_zo, NN.zo)]
        
        sum_zi2=[sum(x) for x in zip(sum_zi2, square(NN.zi))]
        sum_zh2=[sum(x) for x in zip(sum_zh2, square(NN.zh))]
        sum_zo2=[sum(x) for x in zip(sum_zo2, square(NN.zo))]
        
        '''
        sum_zi=map(add, sum_zi, NN.zi)
        sum_zh=map(add, sum_zh, NN.zh)
        sum_zo=map(add, sum_zo, NN.zo)
        
        sum_zi=map(add, sum_zi2, square(NN.zi))
        sum_zh=map(add, sum_zh2, square(NN.zh))
        sum_zo=map(add, sum_zo2, square(NN.zo))
        '''
        
                
        action=np.argmax(ao)      
        ai, reward, done, info = env.step(action)
        
        if done:
            break
            num_step=j     
            
    return(sum_zi,sum_zh,sum_zo,sum_zi2,sum_zh2,sum_zo2,num_step)