Ejemplo n.º 1
0
  def __init__(self, start=0, end=1, frequency = 50, duty_cycle = 0.4, scaling = 1, non_linearity = -1, shape_="monophasic"):
      self.start = start
      self.end = end
    
      self.lit_data = DataLoader()
            
      self.a = Activation(frequency, duty_cycle, scaling, non_linearity)
      self.a.get_activation_signal(self.lit_data.activation_function(), shape=shape_)

      self.a_sol = Activation(frequency, duty_cycle, scaling, non_linearity)
      self.a_sol.get_activation_signal(self.lit_data.activation_function_soleus(), shape=shape_)
     
      rest_length_soleus = self.soleus_length(23.7*np.pi/180)*1.015
      rest_length_tibialis = self.tibialis_length(-37.4*np.pi/180)*0.9158 # lower is earlier activation
      print(rest_length_soleus)
      print(rest_length_tibialis)
      soleus_f0m = 2600.06
      self.soleus = HillTypeMuscle(soleus_f0m, .1342*rest_length_soleus, .8658*rest_length_soleus)
      self.tibialis = HillTypeMuscle(605.3465, .2206*rest_length_tibialis, .7794*rest_length_tibialis)

      # theta, velocity, initial CE length of soleus, initial CE length of TA
      self.initial_state = np.array([self.lit_data.ankle_angle(self.start)[0]*np.pi/180,
                                     self.lit_data.ankle_velocity(self.start)[0]*np.pi/180,
                                     0.827034,
                                     1.050905])
      print(self.initial_state)    
      self.time = None
      self.x1 = None
      self.x2 = None
      self.x3 = None
      self.x4 = None
Ejemplo n.º 2
0
def test_activation_functions():
    # Ensure the correct values are calculated by the member functions

    # We compare against sklearn functions
    from sklearn.neural_network._base import tanh, relu
    from scipy.special import expit as sigmoid

    N = 100

    act = Activation(function='sigmoid')
    x = np.random.uniform(-10.0, 10.0, size=(N, 1))
    assert act(x) == pytest.approx(sigmoid(x))

    act.set(function='tanh')
    x = np.random.uniform(-10.0, 10.0, size=(N, 1))
    assert act(x) == pytest.approx(tanh(x))

    act.set(function='relu')
    x = np.random.uniform(-10.0, 10.0, size=(N, 1))
    assert act(x) == pytest.approx(relu(x))

    alpha = 2.5082958
    act.set(function='leakyrelu', alpha=alpha)
    x = np.random.uniform(-10.0, 10.0, size=(N, 1))
    assert act(x) == pytest.approx((x >= 0.0) * x + (x < 0.0) * alpha * x)
Ejemplo n.º 3
0
    def __init__(self, num_neurons, input_shape):
        print(
            'Adding Layer: input_shape: {}, number of neurons: {}, output_shape: {}'
            .format(input_shape, num_neurons, num_neurons))
        # Let's initialize the weights in interval [0,1) for respective synaptic inputs
        self.weights = np.random.uniform(low=0, high=1, size=input_shape)

        # Lets initialize the biases all with value '1' for every neuron in current layer
        self.biases = np.ones(num_neurons)

        # Lets initialize the activation_potentials all with value '0' for every neuron in current layer
        self.activation_potentials = np.zeros(num_neurons)

        # Outputs of this layer
        self.outputs = np.zeros(num_neurons)

        # Local Gradients of all the neurons in current layer
        self.local_gradients = np.zeros(num_neurons)

        # And finally the activation function, for non-linearity of outputs
        self.activation = Activation()

        # Inputs to this layer -> Outputs from previous layer
        self.previous_layers_outputs = []
        print('Added Layer ... ')
Ejemplo n.º 4
0
def test_activation_set():
    # Default values
    act = Activation()

    # Ensure that changing default values result in changed function calls
    act.set(function='tanh')
    assert act.function == act._tanh

    act.set(function='relu')
    assert act.function == act._relu

    act.set(function='leakyrelu')
    assert act.function == act._leakyrelu

    # Check wrong string error is handled correctly
    caught = False
    try:
        act.set(function='this_is_not_an_allowed_string')
    except ValueError as e:
        caught = True
    assert caught == True

    # Ensure alpha values are set correctly
    alpha = 0.867
    act.set(alpha=alpha)
    assert act.alpha == pytest.approx(alpha)
Ejemplo n.º 5
0
 def __init__(self, method=Method.Sigmoid):
     self.weights = []  # Current weights
     self.old_weights = []  # Last time weights
     self.output = 0.0  # Neuron output
     self.inputted_features = []  # Inputted features
     self.summed_signal = 0.0  # Summed singal (the summation of input)
     self.learning_rate = 0.8  # Learning rate
     self.activition = Activation()  # Activation function
Ejemplo n.º 6
0
def test_activation_init():
    # Ensure the setup is handled correctly when initializing an instance
    # of the activation class
    act = Activation()

    # Default values
    assert act.function == act._sigmoid
    assert act.alpha == pytest.approx(0.01)

    # String to correct function conversion
    act = Activation(function='tanh')
    assert act.function == act._tanh

    act = Activation(function='relu')
    assert act.function == act._relu

    act = Activation(function='leakyrelu')
    assert act.function == act._leakyrelu

    act = Activation(function='sigmoid')
    assert act.function == act._sigmoid

    # Check wrong string error is handled correctly
    caught = False
    try:
        act = Activation(function='this_is_not_an_allowed_string')
    except ValueError as e:
        caught = True
    assert caught == True

    # Check alpha value specification is handled correctly
    alpha = 0.867
    act = Activation(function='relu', alpha=alpha)
    assert act.alpha == pytest.approx(alpha)
Ejemplo n.º 7
0
 def __init__(self):
     self.tag = self.__class__.__name__
     self.samples = []  # 所有的训练样本(特征值)
     self.targets = []  # 范本的目标输出
     self.weights = []  # 权重
     self.bias = 0.0  # 偏权值
     self.learning_rate = 1.0  # 学习速率
     self.max_iteration = 1  # 最大迭代数
     self.convergence = 0.001  # 收敛误差
     self.activation = Activation()
Ejemplo n.º 8
0
 def __init__(self, has_recurrent=False):
     self.weights = []  # <number>
     self.recurrent_weights = []  # <number>
     self.bias = 0.0
     self.delta_value = 0.0  # Current delta value will be next delta value.
     self.has_recurrent = has_recurrent  # Has recurrent inputs ? (hidden net has recurrent, but output net not.
     self.activation = Activation(
     )  # 活化函式的 Get, Set 都在这里: net.activation.method.
     # 有另外开 self.activation_method 来方便存取
     self.output = NetOutput()
     self.timesteps = []  # <Timestep Object>
Ejemplo n.º 9
0
 def __init__(self) -> None:
     super().__init__()
     # hyper params
     self.alpha: float = 1
     self.lambda_: float = 0
     self.c: float = 0
     self.gamma: float = 0
     # model params
     self.layers = []
     # engine params
     self.act: Activation() = None
     self.reg: Regularization() = None
     self.opt: Optimizer() = None
Ejemplo n.º 10
0
    def __init__(self,
                 architecture=[784, 100, 10],
                 activation='sigmoid',
                 learning_rate=0.1,
                 momentum=0.5,
                 weight_decay=1e-4,
                 dropout=0.5,
                 early_stopping=True,
                 seed=99):
        """
        Neural network model initializer.
        """

        # Attributes
        self.architecture = architecture
        self.activation = activation
        self.learning_rate = learning_rate
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.dropout = dropout
        self.early_stopping = early_stopping
        self.seed = seed

        # Turn `activation` and `learning_rate` to class instances
        if not isinstance(self.activation, Activation):
            self.activation = Activation(self.activation)
        if not isinstance(self.learning_rate, LearningRate):
            self.learning_rate = LearningRate(self.learning_rate)

        # Initialize a list of layers
        self.layers = []
        for i, (n_in,
                n_out) in enumerate(zip(architecture[:-2],
                                        architecture[1:-1])):
            l = HiddenLayer('layer{}'.format(i), n_in, n_out, self.activation,
                            self.learning_rate, self.momentum,
                            self.weight_decay, self.dropout, self.seed + i)
            self.layers.append(l)
        # Output layer
        n_in, n_out = architecture[-2], architecture[-1]
        l = OutputLayer('output_layer', n_in, n_out, self.learning_rate,
                        self.momentum, self.weight_decay, self.dropout,
                        self.seed + i + 1)
        self.layers.append(l)

        # Training updates
        self.epoch = 0
        self.training_error = []
        self.validation_error = []
        self.training_loss = []
        self.validation_loss = []
Ejemplo n.º 11
0
def activate_tag_callback(tag):
    if display_manager.active_window() != prompt:
        return
    print "Activating tag: %s" % tag
    display_manager.launch(load_window)
    res = urllib2.urlopen("http://192.168.1.10:8000/api/activate?tag=%s" %
                          tag).read()
    print res
    res_dict = json.loads(res)
    if (res_dict["status"] == "error"):
        load_window.finish()
    else:
        name = res_dict["player"]["first_name"] + " " + res_dict["player"][
            "last_name"]
        team = res_dict["player"]["team"]
        rule = res_dict["player"]["rule_text"]
        activation = Activation(name, team, rule)
        display_manager.launch_consume(activation)
Ejemplo n.º 12
0
 def __init__(self,
              input_layer,
              num_units,
              init_stddev,
              activation_fun=Activation('relu')):
     self.num_units = num_units
     self.activation_fun = activation_fun
     # the input shape will be of size (batch_size, num_units_prev)
     # where num_units_prev is the number of units in the input
     # (previous) layer
     self.input_shape = input_layer.output_size()
     # TODO ################################
     # TODO: implement weight initialization
     # TODO ################################
     # this is the weight matrix it should have shape: (num_units_prev, num_units)
     # use normal distrbution with mean 0 and stdv = init_stddev
     self.W = np.random.normal(0, init_stddev,
                               (self.input_shape[1], num_units))  #FIXME
     # and this is the bias vector of shape: (num_units)
     self.b = np.random.normal(0, init_stddev, (num_units, ))  #FIXME
     # create dummy variables for parameter gradients
     # no need to change these here!
     self.dW = None
     self.db = None
Ejemplo n.º 13
0
  def __init__( self                     ,
                NB_PIPELINE_STAGES  =   4,
                DATAWIDTH           =  32,
                CHANNEL_WIDTH       =   1,
                INIT_DATA           =   0,
                LEN_THETA           =   3,
                CMD_FILE            =  ""):

    self.NB_PIPELINE_STAGES         = NB_PIPELINE_STAGES
    self.DATAWIDTH                  = DATAWIDTH
    self.CHANNEL_WIDTH              = CHANNEL_WIDTH
    self.INIT_DATA                  = INIT_DATA
    self.LEN_THETA                  = LEN_THETA
    self.CMD_FILE                   = CMD_FILE

    # IO Signals
    self.pipeST_A_i                 = PipelineST( self.DATAWIDTH, self.CHANNEL_WIDTH , self.INIT_DATA )
    self.pipeST_B_i                 = PipelineST( self.DATAWIDTH, self.CHANNEL_WIDTH , self.INIT_DATA )
    self.pipeST_o                   = PipelineST( self.DATAWIDTH, self.CHANNEL_WIDTH , self.INIT_DATA )


    # Internal Signals
    self.pipe_out_acc               = PipelineST( self.DATAWIDTH, self.CHANNEL_WIDTH, self.INIT_DATA )

    #----------------- Initializing Pipeline Streams ----------------

    self.operand_a   =   OperandPipeline(   self.NB_PIPELINE_STAGES ,
                                            self.DATAWIDTH          ,
                                            self.CHANNEL_WIDTH      ,
                                            self.INIT_DATA          )

    self.pipeA_stage =   self.operand_a.pipeST_stage_o

    self.operand_b   =   OperandPipeline(   self.NB_PIPELINE_STAGES ,
                                            self.DATAWIDTH          ,
                                            self.CHANNEL_WIDTH      ,
                                            self.INIT_DATA          )

    self.pipeB_stage =   self.operand_b.pipeST_stage_o

    # --- Initializing Command Pipeline
    OPCODE      = "MULT"
    OPSTAGE     = 2
    self.multPipe    =   CommandPipeline(   self.NB_PIPELINE_STAGES ,
                                            self.DATAWIDTH          ,
                                            self.CHANNEL_WIDTH      ,
                                            self.INIT_DATA          ,
                                            OPCODE                  ,
                                            OPSTAGE                 )

    self.multC_stage =   self.multPipe.pipeST_stage_o

    # ---- Initializing Accumulator Block

    self.accuPipe    =   Accumulator(       self.DATAWIDTH          ,
                                            self.CHANNEL_WIDTH      ,
                                            self.INIT_DATA          ,
                                            self.LEN_THETA          )

    # ---- Initializing Activation Block

    ACT_DATAWIDTH= 3    # 0 or 1 for classification
    self.activPipe   =   Activation(        ACT_DATAWIDTH           ,
                                            self.CHANNEL_WIDTH      ,
                                            self.INIT_DATA          )
Ejemplo n.º 14
0
def sim_command_pipeline(pars_obj):

    global test_decimal_shift, theta_decimal_shift

    #------------------ Initializing Pipeline depths ---------------

    NB_PIPELINE_STAGES = 5
    DATAWIDTH = 32
    #-------------- Simulation Initialisations ---------------------

    reset = Signal(bool(1))
    clk = Signal(bool(0))
    elapsed_time = Signal(0)

    clkgen = clk_driver(elapsed_time, clk, period=20)

    #----------------------------------------------------------------

    #----------------- Initializing Pipeline Streams ----------------

    # --- Pipeline Pars
    pars = OperandPipelinePars()
    pars.NB_PIPELINE_STAGES = NB_PIPELINE_STAGES
    pars.DATAWIDTH = DATAWIDTH
    pars.CHANNEL_WIDTH = 2
    global floatDataBus
    if (True == floatDataBus):
        pars.INIT_DATA = 0.0  # requires floating point computation
    else:
        pars.INIT_DATA = 0  # requires intbv computation

    # --- Initializing Pipeline A
    pipe_inpA = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH, pars.INIT_DATA)
    pipe_outA = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH, pars.INIT_DATA)

    operand_a = OperandPipeline()
    ioA = OperandPipelineIo()
    ioA(pars)

    # --- Initializing Pipeline B
    pipe_inpB = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH, pars.INIT_DATA)
    pipe_outB = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH, pars.INIT_DATA)

    operand_b = OperandPipeline()
    ioB = OperandPipelineIo()
    ioB(pars)

    # --- Initializing Command Pipeline
    pipe_multRes = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH,
                              pars.INIT_DATA)
    multcmdFile = '../tests/mult_pipeline.list'
    parsMult = CommandPipelinePars()
    parsMult.DATAWIDTH = pars.DATAWIDTH
    parsMult.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
    parsMult.INIT_DATA = pars.INIT_DATA
    parsMult.STAGE_NB = 1
    parsMult(parsMult, multcmdFile)
    multPipe = CommandPipeline()
    ioMult = CommandPipelineIo()
    ioMult(pars)

    # ---- Initializing Accumulator Block

    pipe_out_acc = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH,
                              pars.INIT_DATA)
    parsAcc = AccumulatorPars()
    parsAcc.DATAWIDTH = pars.DATAWIDTH
    parsAcc.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
    parsAcc.INIT_DATA = pars.INIT_DATA
    global LEN_THETA
    parsAcc.NB_ACCUMULATIONS = LEN_THETA
    accuPipe = Accumulator()
    accuPipe(parsAcc)

    # ---- Initializing Activation Block

    parsActiv = ActivationPars()
    parsActiv.DATAWIDTH = 3  # 0 or 1 for classification
    parsActiv.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
    parsActiv.INIT_DATA = pars.INIT_DATA
    pipe_out_activ = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH,
                                pars.INIT_DATA)
    activPipe = Activation()
    activPipe(parsActiv)
    #----------------------------------------------------------------

    #----------------- Connecting Pipeline Blocks -------------------

    inst = []
    inst.append(
        operand_a.block_connect(pars, reset, clk, pipe_inpA, pipe_outA, ioA))
    inst.append(
        operand_b.block_connect(pars, reset, clk, pipe_inpB, pipe_outB, ioB))
    #----------------------------------------------------------------

    #----------------- Connecting Command Pipeline -------------------
    # Mult Pipeline
    inst.append(
        multPipe.block_connect(parsMult, reset, clk, ioA, ioB, pipe_multRes,
                               ioMult))

    #----------------------------------------------------------------

    #----------------- Connecting Accumulator  --------------
    # Accu
    inst.append(
        accuPipe.block_connect(parsAcc, reset, clk, 0, pipe_multRes,
                               pipe_out_acc))

    #----------------------------------------------------------------

    #----------------- Connecting Activation  --------------
    # Simple Step Activation function
    inst.append(
        activPipe.block_step_connect(parsActiv, reset, clk, pipe_out_acc,
                                     pipe_out_activ))

    #----------------------------------------------------------------

    #----------------- Logistic Regression Test File -------------------

    lr_test_file = "../tests/ex2data1.txt"
    lr_theta_file = "../tests/theta1.txt"

    #--- Loading Test and Theta Values

    test_file_list = []
    theta_file_list = []

    nb_training_examples = 0
    # Loading test data
    with open(lr_test_file, 'r') as f:
        d0 = 1.0  # Always first element is 1

        for line in f:
            #print line
            d1, d2, y = line.split(',')
            d0 = round(float(d0), DEF_ROUND)
            d1 = round(float(d1), DEF_ROUND)
            d2 = round(float(d2), DEF_ROUND)
            test_file_list.extend([d0, d1, d2])
            label.extend([int(y)])
            nb_training_examples += 1

    #loading theta
    with open(lr_theta_file, 'r') as f:
        t0, t1, t2 = (f.read().split('\n')[0]).split(',')
        t0 = round(float(t0), DEF_ROUND)
        t1 = round(float(t1), DEF_ROUND)
        t2 = round(float(t2), DEF_ROUND)
        for i in range(nb_training_examples):
            theta_file_list.extend([t0, t1, t2])

    # exp10 shifts done for theta and test data as per requirements when intbv used
    if (False == floatDataBus):
        test_file_list = [
            int(i * (10**test_decimal_shift)) for i in test_file_list
        ]
        theta_file_list = [
            int(i * (10**theta_decimal_shift)) for i in theta_file_list
        ]

    #print test_file_list
    #print theta_file_list
    #----------------------------------------------------------------

    #----------------- Shift Enable for pipeData -------------------

    shiftEn_i = Signal(bool(0))

    @always(clk.posedge, reset.posedge)
    def shift_signal():
        if reset:
            shiftEn_i.next = 1
        else:
            shiftEn_i.next = not shiftEn_i

    @always_comb
    def shiftOperand_signal():
        ioB.shiftEn_i.next = shiftEn_i
        ioA.shiftEn_i.next = shiftEn_i

    #----------------------------------------------------------------

    #----------------- Reset For the Module  --------------------

    @always(clk.posedge)
    def stimulus():
        if elapsed_time == 40:
            reset.next = 0

    #----------------------------------------------------------------

    #----------------- Input Data for the Modules  --------------------

    @always_comb
    def transmit_data_process():
        global line_nb
        if (shiftEn_i == 1 and nbTA == nbTB and nbTA < MAX_NB_TRANSFERS):

            pipe_inpA.data.next = (test_file_list[line_nb])
            pipe_inpA.valid.next = 1
            pipe_inpB.data.next = (theta_file_list[line_nb])
            pipe_inpB.valid.next = 1
            line_nb += 1

        else:
            pipe_inpA.valid.next = 0
            pipe_inpB.valid.next = 0

    #----------------------------------------------------------------

    #----------------- Storing Transmitted Data  --------------------

    @always(clk.posedge, reset.posedge)
    def trans_dataA_process():
        global trans_dataA, trans_dataB, nbTA
        if reset == 1:
            pass
        elif (pipe_inpA.valid == 1 and nbTA < MAX_NB_TRANSFERS):
            nbTA += 1
            trans_dataA.extend([pipe_inpA.data])

    @always(clk.posedge, reset.posedge)
    def trans_dataB_process():
        global trans_dataA, trans_dataB, nbTB
        if reset == 1:
            pass
        elif (pipe_inpB.valid == 1 and nbTB < MAX_NB_TRANSFERS):
            nbTB += 1
            trans_dataB.extend([pipe_inpB.data])

    #----------------------------------------------------------------

    #----------------- Storing Received Data  -----------------------

    @always(clk.posedge)
    def receive_data_process():
        global recv_data, nbR, acc_out

        # Collecting multiplier data
        if (pipe_multRes.valid == 1):
            if (False == floatDataBus):
                mult_out = pipe_multRes.data
            else:
                mult_out = (round(pipe_multRes.data, DEF_ROUND))
            recv_data.extend([mult_out])

        # Collecting Activation Data
        if (pipe_out_activ.valid == 1):
            nbR += LEN_THETA
            predict = int(pipe_out_activ.data)
            prediction_res.extend([predict])
            if __debug__:
                print(" prediction: {:d}".format(predict))
            if (nbR == MAX_NB_TRANSFERS):
                raise StopSimulation(
                    "Simulation Finished in %d clks: In total " % now() +
                    str(MAX_NB_TRANSFERS) + " data words received")

        # Collecting Accumulator Data
        if (pipe_out_acc.valid == 1):
            acc_out = pipe_out_acc.data
            #prob=(1.0/(1+ (math.exp(-1.0*acc_out) )))        # Sigmoid activation Function
            if __debug__:
                if (False == floatDataBus):
                    print("{0:d} Acc: {1:d} ".format(int(nbR / LEN_THETA + 1),
                                                     int(acc_out),
                                                     i=DEF_ROUND),
                          end=' ')
                else:
                    print("{0:d} Acc: {1:0.{i}f}".format(int(nbR / LEN_THETA +
                                                             1),
                                                         float(acc_out),
                                                         i=DEF_ROUND),
                          end=' ')
            if (False == floatDataBus):
                acc_out_list.extend([int(acc_out)])
            else:
                acc_out_list.extend([round(acc_out, DEF_ROUND)])
            #print "nbR:" + str(nbR)

    #----------------------------------------------------------------

    #----------------- Max Simulation Time Exit Condition -----------

    @always(clk.posedge)
    def simulation_time_check():
        sim_time_now = now()
        if (sim_time_now > MAX_SIM_TIME):
            raise StopSimulation(
                "Warning! Simulation Exited upon reaching max simulation time of "
                + str(MAX_SIM_TIME) + " clocks")

    #----------------------------------------------------------------
    return instances()
Ejemplo n.º 15
0
                    rmse_toe_height_plot[i][j], independent_1[i][j],
                    independent_2[i][j]
                ])

    # Sorts by first element (ie RMSE)
    top_viable = sorted(viable)
    if len(top_viable) >= 5:
        top_viable = top_viable[:5]

    # Find fatigues
    emg_data = load_data('./data/ta_vs_gait.csv')
    emg_data = np.array(emg_data)
    emg_function = get_norm_emg(emg_data)

    fatigues = []
    all_fatigues = []
    for i in range(len(top_viable)):
        a = Activation(frequency, duty_cycle, scaling, top_viable[i][1])
        a.get_activation_signal(emg_function, shape=shape[top_viable[i][2]])
        fatigues.append([a.get_fatigue(), i])

    for i in range(len(viable)):
        a = Activation(frequency, duty_cycle, scaling, viable[i][1])
        a.get_activation_signal(emg_function, shape=shape[viable[i][2]])
        all_fatigues.append([a.get_fatigue(), i])

    # Sorts by first element (ie fatigue)
    top_fatigues = sorted(fatigues)
    optimal = top_viable[top_fatigues[0][1]]
    print(optimal)
Ejemplo n.º 16
0
                    independent_2[i][j]
                ])

    # Sorts by first element (ie RMSE)
    top_viable = sorted(viable)
    if len(top_viable) >= 5:
        top_viable = top_viable[:5]

    # Find fatigues
    emg_data = load_data('./data/ta_vs_gait.csv')
    emg_data = np.array(emg_data)
    emg_function = get_norm_emg(emg_data)

    fatigues = []
    all_fatigues = []
    for i in range(len(top_viable)):
        a = Activation(top_viable[i][1], top_viable[i][2], scaling,
                       non_linearity)
        a.get_activation_signal(emg_function)
        fatigues.append([a.get_fatigue(), i])

    for i in range(len(viable)):
        a = Activation(viable[i][1], viable[i][2], scaling, non_linearity)
        a.get_activation_signal(emg_function)
        all_fatigues.append([a.get_fatigue(), i])

    # Sorts by first element (ie fatigue)
    top_fatigues = sorted(fatigues)
    optimal = top_viable[top_fatigues[0][1]]
    print(optimal)
Ejemplo n.º 17
0
    def __init__(self,
                 input_layer_size,
                 state_layer_size,
                 state_layer_activation,
                 output_layer_size,
                 output_layer_activation,
                 epochs=100,
                 bptt_truncate=None,
                 learning_rule='bptt',
                 kernel=None,
                 eta=0.001,
                 rand=None,
                 verbose=0):
        """
        Notes:
            U - weight matrix from input into hidden layer.
            W - weight matrix from hidden layer to hidden layer.
            V - weight matrix from hidden layer to output layer.

        Inputs:
            input_size:
                Size of the input vector. We expect a 2D numpy array, so this should be X.shape[1]

            state_layer_size:
                State layer size.

            state_layer_activation:
                A string. Refer to activation.py

            output_size:
                Size of the output vector. We expect a 2D numpy array, so this should be Y.shape[1]

            output_layer_activation:
                A string. Refer to activation.py

            epochs(opt):
                Number of epochs for a single training sample.

            learning_rule(opt):
                Choose between 'bptt' and 'modified'

            bptt_truncate(opt):
                If left at None, back propagation through time will be applied for all time steps. 

                Otherwise, a value for bptt_truncate means that 
                bptt will only be applied for at most bptt_truncate steps.

                Only considered when learning_rule == 'bptt'

            kernel(opt):
                # TODO - fill this
                Only considered when learning_rule == 'modified'

            eta (opt):
                Learning rate. Initialized to 0.001.

            rand (opt):
                Random seed. Initialized to None (no random seed).

            verbose (opt):
                Verbosity: levels 0 - 2

        Outputs:
            None
        """
        np.random.seed(rand)

        self.learning_rule = learning_rule.lower()

        if self.learning_rule == 'bptt':
            self.gradient_function = self.bptt
        elif self.learning_rule == 'modified':
            self.gradietn_function = self.modified_learning_rule
        else:
            raise ValueError

        self.input_layer_size = input_layer_size

        self.state_layer_size = state_layer_size
        self.state_layer_activation = state_layer_activation
        self.state_activation = Activation(state_layer_activation)

        self.output_layer_size = output_layer_size
        self.output_layer_activation = output_layer_activation
        self.output_activation = Activation(output_layer_activation)

        self.epochs = epochs

        self.kernel = kernel
        self.bptt_truncate = bptt_truncate

        # U - weight matrix from input into state layer.
        # W - weight matrix from state layer to state layer.
        # V - weight matrix from state layer to output layer.
        self.U = np.random.uniform(-np.sqrt(1. / input_layer_size),
                                   np.sqrt(1. / input_layer_size),
                                   (state_layer_size, input_layer_size))
        self.V = np.random.uniform(-np.sqrt(1. / state_layer_size),
                                   np.sqrt(1. / state_layer_size),
                                   (output_layer_size, state_layer_size))
        self.W = np.random.uniform(-np.sqrt(1. / state_layer_size),
                                   np.sqrt(1. / state_layer_size),
                                   (state_layer_size, state_layer_size))
        self.state_bias = np.zeros((state_layer_size, 1))
        self.output_bias = np.zeros((output_layer_size, 1))

        self.eta = eta
        self.verbose = verbose
        self.show_progress_bar = verbose > 0
Ejemplo n.º 18
0
    def addLayer(self,
                 inputs      = None,
                 neurons     = None,
                 activations = None,
                 alpha       = None,
                 outputs     = None,
                 output      = False) :

        if neurons is None :
            if self.neurons is None :
                raise ValueError(   "Number of neurons is not specified. "      +
                                    "Use the NeuralNetwork class constructor, " +
                                    "the .set method, or give the number as "   +
                                    "input to this method (.addLayer).")
            else :
                neurons = self.neurons

        if activations is None :
            if self.activations is None :
                warnings.warn(  "No activation function specified, using "  +
                                "sigmoid activation for this (and all "     +
                                "subsequent layers added).")
                self.activations = 'sigmoid'
                activations = self.activations
            else :
                activations = self.activations

        if self.weights is None :
            if inputs is None :
                if self.inputs is None :
                    raise ValueError(   "The number of inputs is not specified."   +
                                        "Use the NeuralNetwork class constructor, " +
                                        "the .set method, or give the number as "   +
                                        "input to this method (.addLayer).")
                else :
                    inputs = self.inputs
            else :
                self.inputs = inputs 
            
            if not self.silent :
                print(  "Adding input layer with " + str(neurons) + " neurons "  +
                        "using " + str(activations) + " activations.")
            #W = np.random.uniform(-1.0, 1.0, size=(inputs, neurons))
            #b = np.random.uniform(-0.1, 0.1, size=(neurons,1))
            W = self.initializeWeight(inputs, neurons, activations)
            b = np.zeros(shape=(neurons,1))
            f = Activation(function = activations, alpha = alpha)

            self.weights = [W]
            self.biases  = [b]
            self.act     = [f]
            
        elif output == True :
            if outputs is None :
                if self.outputs is None :
                    raise ValueError(   "The number of outputs is not specified."   +
                                        "Use the NeuralNetwork class constructor, " +
                                        "the .set method, or give the number as "   +
                                        "input to this method (.addLayer / "        +
                                        ".addOutputLayer).")
                else :
                    outputs = self.outputs
            else :
                if self.outputs != outputs :
                    warnings.warn(  "The number of outputs was earlier set to "         +
                                    str(self.outputs) + ", but the value specified to " +
                                    " .addLayer / .addOutputLayer of " + str(outputs)   +
                                    " overrides this value.")
                    self.outputs = outputs

            if not self.silent :
                print(  "Adding output layer with " + str(outputs) + " outputs, "  +
                        "with " + str(activations) + " activation.")
            previousLayerNeurons = self.weights[-1].shape[1]
            #W = np.random.uniform(-1.0, 1.0, size=(previousLayerNeurons, outputs))
            #b = np.random.uniform(-0.1, 0.1, size=(outputs,1))
            W = self.initializeWeight(previousLayerNeurons, outputs, activations)
            b = np.zeros(shape=(outputs,1))
            f = Activation(function = activations, alpha = alpha)
            
            self.weights.append(W)
            self.biases .append(b)
            self.act    .append(f)
        else :
            if not self.silent :
                print(  "Adding layer with " + str(neurons) + " neurons using "  +
                        str(activations) + " activations.")
            previousLayerNeurons = self.weights[-1].shape[1]
            #W = np.random.uniform(-1.0, 1.0, size=(previousLayerNeurons, neurons))
            #b = np.random.uniform(-0.1, 0.1, size=(neurons,1))
            W = self.initializeWeight(previousLayerNeurons, neurons, activations)
            b = np.zeros(shape=(neurons,1))
            f = Activation(function = activations, alpha = alpha)

            self.weights.append(W)
            self.biases .append(b)
            self.act    .append(f)
Ejemplo n.º 19
0
    for i in range(len(above_0_plot)):
        if above_0_plot[i] == 1:
            viable.append([rmse_toe_height_plot[i], frequency[i]])

    # Sorts by first element (ie RMSE)
    top_viable = sorted(viable)
    if len(top_viable) >= 5:
        top_viable = top_viable[:5]

    # Find fatigues
    emg_data = load_data('./data/ta_vs_gait.csv')
    emg_data = np.array(emg_data)
    emg_function = get_norm_emg(emg_data)

    fatigues = []
    all_fatigues = []
    for i in range(len(top_viable)):
        a = Activation(top_viable[i][1], duty_cycle, scaling, non_linearity)
        a.get_activation_signal(emg_function, shape="halfsin")
        fatigues.append([a.get_fatigue(), i])

    for i in range(len(viable)):
        a = Activation(viable[i][1], duty_cycle, scaling, non_linearity)
        a.get_activation_signal(emg_function, shape="halfsin")
        all_fatigues.append([a.get_fatigue(), i])

    # Sorts by first element (ie fatigue)
    top_fatigues = sorted(fatigues)
    optimal = top_viable[top_fatigues[0][1]]
    print(optimal)
Ejemplo n.º 20
0
def foo(mod, op, d):
    if (op[0] == "linear"):
        xx = Linear(d)

    # rnncell, lstmcell, grucell
    elif (mod[0] in ["LSTMCell", "GRUCell"]) and (op[0] == "forward"):
        xx = RNNCell(d)

    elif op[0] in [
            "conv1d",
            "conv2d",
    ]:
        xx = Conv(d)

    elif (op[0] in Pointwise.ops):
        xx = Pointwise(d)

    elif (op[0] in Convert.ops):
        xx = Convert(d)

    elif op[0] in ["__matmul__", "matmul"]:
        xx = Matmul(d)

    elif op[0] == "embedding":
        xx = Embedding(d)

    #reduction
    elif op[0] == "sum":
        xx = Sum(d)

    elif op[0] == "mean":
        xx = Mean(d)

    elif op[0] == "norm":
        xx = Norm(d)

    elif op[0] == "dropout":
        xx = Dropout(d)

    #Index, Slice, Join, Mutate
    elif (op[0] == "cat"):
        xx = Cat(d)

    elif (op[0] == "reshape"):
        xx = Reshape(d)

    elif (op[0] == "masked_scatter_"):
        xx = MaskedScatter(d)

    elif (op[0] == "gather"):
        xx = Gather(d)

    elif (op[0] == "nonzero"):
        xx = Nonzero(d)

    elif (op[0] == "index_select"):
        xx = IndexSelect(d)

    elif (op[0] == "masked_select"):
        xx = MaskedSelect(d)

    #blas
    elif op[0] in ["addmm", "addmm_"]:
        xx = Addmm(d)

    elif op[0] == "mm":
        xx = Mm(d)

    elif op[0] == "bmm":
        xx = Bmm(d)

    #softmax
    elif op[0] == "softmax":
        xx = Softmax(d)

    elif op[0] == "log_softmax":
        xx = LogSoftmax(d)

    #loss
    elif op[0] == "mse_loss":
        xx = MSELoss(d)

    #optimizers
    elif op[0] == "adam":
        xx = Adam(d)

    #normalization
    elif op[0] == "batch_norm":
        xx = BatchNorm(d)

    #random
    elif op[0] == "randperm":
        xx = RandPerm(d)

    #misc
    elif op[0] == "copy_":
        xx = Copy(d)

    elif op[0] == "clone":
        xx = Clone(d)

    elif op[0] == "contiguous":
        xx = Contiguous(d)

    elif op[0] == "any":
        xx = Any(d)

    elif (op[0] in Activation.ops):
        xx = Activation(d)

    elif op[0] == "to":
        xx = Convert(d)

    else:
        xx = Foo(d)

    return xx
Ejemplo n.º 21
0
def lr_top(pars, reset, clk, pipe_inpA, pipe_inpB, pipe_out_activ):

  #----------------- Initializing Pipeline Streams ----------------

  # --- Initializing Pipeline A
  pipe_outA  = PipelineST(pars.DATAWIDTH,pars.CHANNEL_WIDTH,pars.INIT_DATA)

  operand_a=OperandPipeline()
  ioA=OperandPipelineIo()
  ioA(pars)

  # --- Initializing Pipeline B
  pipe_outB  = PipelineST(pars.DATAWIDTH,pars.CHANNEL_WIDTH,pars.INIT_DATA)

  operand_b=OperandPipeline()
  ioB=OperandPipelineIo()
  ioB(pars)

  # --- Initializing Command Pipeline
  pipe_multRes  = PipelineST(pars.DATAWIDTH,pars.CHANNEL_WIDTH,pars.INIT_DATA)
  multcmdFile='tb/tests/mult_pipeline.list'
  parsMult= CommandPipelinePars()
  parsMult.DATAWIDTH= pars.DATAWIDTH
  parsMult.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
  parsMult.INIT_DATA = pars.INIT_DATA
  parsMult.STAGE_NB = 1
  parsMult(parsMult,multcmdFile)
  multPipe=CommandPipeline()
  ioMult=CommandPipelineIo()
  ioMult(pars)

  # ---- Initializing Accumulator Block

  pipe_out_acc = PipelineST(pars.DATAWIDTH,pars.CHANNEL_WIDTH,pars.INIT_DATA)
  parsAcc= AccumulatorPars()
  parsAcc.DATAWIDTH= pars.DATAWIDTH
  parsAcc.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
  parsAcc.INIT_DATA = pars.INIT_DATA
  parsAcc.NB_ACCUMULATIONS = pars.LEN_THETA
  accuPipe= Accumulator()
  accuPipe(parsAcc)

  # ---- Initializing Activation Block

  parsActiv= ActivationPars()
  parsActiv.DATAWIDTH= 3    # 0 or 1 for classification
  parsActiv.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
  parsActiv.INIT_DATA = pars.INIT_DATA
  activPipe= Activation()
  activPipe(parsActiv)
  #----------------------------------------------------------------

  #----------------- Connecting Pipeline Blocks -------------------

  trainingData=(operand_a.block_connect(pars, reset, clk, pipe_inpA, pipe_outA, ioA))
  theta=(operand_b.block_connect(pars, reset, clk, pipe_inpB, pipe_outB, ioB))
  #----------------------------------------------------------------

  #----------------- Connecting Command Pipeline -------------------
  # Mult Pipeline
  command=(multPipe.block_connect(parsMult, reset, clk, ioA, ioB, pipe_multRes, ioMult))
  #----------------------------------------------------------------

  #----------------- Connecting Accumulator  --------------
  # Accu
  acc_reset=Signal(bool(0))
  accumulator=(accuPipe.block_connect(parsAcc, reset, clk, acc_reset, pipe_multRes, pipe_out_acc))

  #----------------------------------------------------------------

  #----------------- Connecting Activation  --------------
  # Simple Step Activation function
  activation=(activPipe.block_step_connect(parsActiv, reset, clk, pipe_out_acc, pipe_out_activ ))

  #----------------------------------------------------------------

  return instances()
Ejemplo n.º 22
0
    )  # plot CE,SE,PE force-length curves and CE force-velocity curve
    print(get_velocity(1.0, np.array([1.0]), np.array(
        [1.01])))  # calculate velocity given a=1.0,lm=1.0,ls=1.01

    # Constants
    max_isometric_force = 605.0
    total_length = 0.44479194718764087
    resting_muscle_length = .25 * total_length
    resting_tendon_length = .75 * total_length

    emg_data = load_data('./data/ta_vs_gait.csv')
    emg_data = np.array(emg_data)
    emg_data_regress = get_norm_emg(emg_data)

    frequency, duty_cycle, scaling, non_linearity = 35, 0.5, 1, -1
    a = Activation(frequency, duty_cycle, scaling, non_linearity)
    a.get_activation_signal(emg_data_regress, shape="monophasic")

    # Create an HillTypeMuscle using the given constants
    muscle = HillTypeMuscle(max_isometric_force, resting_muscle_length,
                            resting_tendon_length)

    # Dynamic equation
    def f(t, x):
        normalized_tendon_length = muscle.norm_tendon_length(total_length, x)
        temp = get_velocity(a.get_amp(t / 100), np.array([x]),
                            np.array([normalized_tendon_length]))
        return temp
        # return 100*get_velocity(0, np.array([x]), np.array([normalized_tendon_length]))

    # Simulate using rk45
Ejemplo n.º 23
0
 def __init__(self, learning_rate: float, X: np.array, Y: np.array,
              func: str):
     super().__init__(learning_rate, X, Y)
     self._activer = Activation(func)
Ejemplo n.º 24
0
import numpy as np
import pandas as pd
from utilities import Utilities
from activation import Activation
act = Activation()


class NeuralNetwork:
    def __init__(self,
                 W=np.array(0, dtype=float),
                 b=0,
                 epoch=10,
                 learn_rate=0.01):
        """
    This is the constructor class
    Input:
    * W: an array of weights
    * b: the bias
    * epoch: the number of loops to fit model
    * learn_rate: the scale of gradient descent steps
    * cost: the array of regularization
    Ouput:
    * returns a log-loss neural network object
    """
        self.w_ = W
        self.b_ = b
        self.e_ = epoch
        self.l_ = learn_rate
        self.c_ = None
        np.random.seed(143)
Ejemplo n.º 25
0
 def set_activation(self, frequency, duty_cycle, scaling, non_linearity, shape_):
     self.a = Activation(frequency, duty_cycle, scaling, non_linearity)
     self.a.get_activation_signal(self.lit_data.activation_function(), shape=shape_)
     self.a.plot()
Ejemplo n.º 26
0
    def __init__(self,
                 input_layer_size,
                 state_layer_size,
                 state_layer_activation,
                 output_layer_size,
                 output_layer_activation,
                 epochs=100,
                 bptt_truncate=None,
                 learning_rule='bptt',
                 tau=None,
                 eta=0.001,
                 rand=None,
                 verbose=0):
        """
        Notes:
            U - weight matrix from input into hidden layer.
            W - weight matrix from hidden layer to hidden layer.
            V - weight matrix from hidden layer to output layer.
        Inputs:
            input_size:
                Size of the input vector. We expect a 2D numpy array, so this should be X.shape[1]
            state_layer_size:
                State layer size.
            state_layer_activation:
                A string. Refer to activation.py
            output_size:
                Size of the output vector. We expect a 2D numpy array, so this should be Y.shape[1]
            output_layer_activation:
                A string. Refer to activation.py
            epochs(opt):
                Number of epochs for a single training sample.
            learning_rule(opt):
                Choose between 'bptt','fa', 'dfa' or 'modified' 

            bptt_truncate(opt):
                If left at None, back propagation through time will be applied for all time steps. 
                Otherwise, a value for bptt_truncate means that 
                bptt will only be applied for at most bptt_truncate steps.
                Only considered when learning_rule == 'bptt'
            kernel(opt):
                # TODO - fill this
                Only considered when learning_rule == 'modified'
            eta (opt):
                Learning rate. Initialized to 0.001.
            rand (opt):
                Random seed. Initialized to None (no random seed).
            verbose (opt):
                Verbosity: levels 0 - 2
        Outputs:
            None
        """
        np.random.seed(rand)

        self.learning_rule = learning_rule.lower()

        if self.learning_rule == 'bptt':
            self.gradient_function = self.bptt
        elif self.learning_rule == 'fa':
            self.gradient_function = self.feedback_alignment
        elif self.learning_rule == 'dfa':
            self.gradient_function = self.direct_feedback_alignment
        elif self.learning_rule == 'modified':
            self.gradient_function = self.modified_learning_rule
        else:
            raise ValueError

        self.input_layer_size = input_layer_size

        self.state_layer_size = state_layer_size
        self.state_layer_activation = state_layer_activation
        self.state_activation = Activation(state_layer_activation)

        self.output_layer_size = output_layer_size
        self.output_layer_activation = output_layer_activation
        self.output_activation = Activation(output_layer_activation)

        self.epochs = epochs

        self.tau = tau
        self.bptt_truncate = bptt_truncate

        self.kernel_convs = None

        # U - weight matrix from input into state layer.
        # W - weight matrix from state layer to state layer.
        # V - weight matrix from state layer to output layer.
        """
        if self.learning_rule == 'bptt':
            self.U = np.random.uniform(-np.sqrt(1./input_layer_size),
                                        np.sqrt(1./input_layer_size), 
                                        (state_layer_size, input_layer_size))
            self.V = np.random.uniform(-np.sqrt(1./state_layer_size),
                                        np.sqrt(1./state_layer_size),
                                        (output_layer_size, state_layer_size))
            self.W = np.random.uniform(-np.sqrt(1./state_layer_size),
                                        np.sqrt(1./state_layer_size),
                                        (state_layer_size, state_layer_size))
            else:
        """
        if state_layer_size == input_layer_size and state_layer_size == output_layer_size:
            print "Using identity matrices for U and V"
            self.U = np.eye(state_layer_size)
            self.V = np.eye(state_layer_size)
        else:
            self.U = np.random.uniform(1, 2.,
                                       (state_layer_size, input_layer_size))
            self.V = np.random.uniform(1, 2.,
                                       (output_layer_size, state_layer_size))

        self.W = np.random.uniform(-0.5, 0.5,
                                   (state_layer_size, state_layer_size))
        # see if W matrix randomization is the cause
        #self.W = np.random.rand(2, 2) - 1/2#np.array([[0.51940038, -0.57702151],[0.64065148, 0.31259335]])
        #self.W = np.array([[0.51940038, -0.57702151],[0.64065148, 0.31259335]])

        self.state_bias = np.zeros((state_layer_size, 1))
        self.output_bias = np.zeros((output_layer_size, 1))

        # B - Feedback weight matrix for all layers
        """
        self.B = np.random.uniform(-np.sqrt(1./state_layer_size),
                                    np.sqrt(1./state_layer_size), 
                                    (state_layer_size, input_layer_size))
                                    """
        self.B = np.random.uniform(0., 0.5, self.W.shape)

        self.eta = eta
        self.verbose = verbose
        self.show_progress_bar = verbose > 0