def __init__(self, args, pretrained):
        super(Transfrmr_bidaf, self).__init__()
        self.embed = embed.Embedding(args, pretrained)

        # Encoder module
        self.encoder_ctxt = encode.Encoder_block(args, 2 * args.word_dim)
        self.encoder_ques = encode.Encoder_block(args, 2 * args.word_dim)

        #Attention Flow Layer
        self.att_weight_c = Linear(args.hidden_size * 2, 1, args.dropout)
        self.att_weight_q = Linear(args.hidden_size * 2, 1, args.dropout)
        self.att_weight_cq = Linear(args.hidden_size * 2, 1, args.dropout)
        self.N = args.Model_encoder_size
        self.dropout = nn.Dropout(p=args.dropout)

        #Model Encoding Layer
        self.Model_encoder = self.get_clones(
            encode.Encoder_block(args, 8 * args.word_dim),
            args.Model_encoder_size)
        # self.Model2start= Linear(16 * args.word_dim, 8 * args.word_dim,args.dropout)
        # self.Model2end = Linear(16 * args.word_dim, 8 * args.word_dim,args.dropout)
        # self.start_idx = Linear(16 * args.word_dim,1,args.dropout)
        # self.end_idx = Linear(16 * args.word_dim, 1, args.dropout)
        self.start_idx = nn.Linear(16 * args.word_dim, 1)
        self.end_idx = nn.Linear(16 * args.word_dim, 1)
Example #2
0
class RNN(object):
    def __init__(self, input_size):
        super(RNN, self).__init__()

        self.encoder = Encoder(input_size)
        self.decoder = Decoder(input_size)

        self.loss = nn.CrossEntropyLoss()
        self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=0.1)
        self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=0.1)

    def train(self, input, target):
        self.encoder_optimizer.zero_grad()
        self.decoder_optimizer.zero_grad()

        hidden_state = None
        #Encoder
        #hidden_state = self.encoder.first_hidden()
        _, hidden_state = self.encoder.forward(input.long(), hidden_state)

        #Decoder
        #softmax batch * 2
        total_loss, outputs = 0, []

        _, softmax, hidden_state = self.decoder.forward(input, hidden_state)
        total_loss = self.loss(softmax, target.squeeze(1).long())
        total_loss.backward()

        acc_sum = (softmax.argmax(dim=1) == target.squeeze(1)).sum().item()

        self.decoder_optimizer.step()
        self.encoder_optimizer.step()

        return total_loss.item(), outputs, acc_sum
Example #3
0
File: s3g.py Project: jetty840/s3g
  def get_tool_status(self, tool_index):
    """
    Retrieve some information about the tool
    @param int tool_index: The tool we would like to query for information
    @return A dictionary containing status information about the tool_index
      ExtruderReady : The extruder has reached target temp
      ExtruderNotPluggedIn : The extruder thermocouple is not detected by the bot
      ExturderOverMaxTemp : The temperature measured at the extruder is greater than max allowed
      ExtruderNotHeating : In the first 40 seconds after target temp was set, the extruder is not heating up as expected
      ExtruderDroppingTemp : After reaching and maintaining temperature, the extruder temp has dropped 30 degrees below target
      PlatformError: an error was detected with the platform heater (if the tool supports one).  
        The platform heater will fail if an error is detected with the sensor (thermocouple) 
        or if the temperature reading appears to be unreasonable.
      ExtruderError: An error was detected with the extruder heater (if the tool supports one).  
        The extruder heater will fail if an error is detected with the sensor (thermocouple) or 
        if the temperature reading appears to be unreasonable
    """
    response = self.tool_query(tool_index, slave_query_command_dict['GET_TOOL_STATUS'])

    [resonse_code, bitfield] = Encoder.unpack_response('<BB', response)

    bitfield = Encoder.decode_bitfield(bitfield)

    returnDict = {
      "ExtruderReady"        : bitfield[0], 
      "ExtruderNotPluggedIn" : bitfield[1],
      "ExtruderOverMaxTemp"  : bitfield[2],
      "ExtruderNotHeating"   : bitfield[3],
      "ExtruderDroppingTemp" : bitfield[4],
      "PlatformError"        : bitfield[6],
      "ExtruderError"        : bitfield[7],
    }
    return returnDict
Example #4
0
 def __init__(self,
              ddconfig,
              lossconfig,
              n_embed,
              embed_dim,
              ckpt_path=None,
              ignore_keys=[],
              image_key="image",
              colorize_nlabels=None,
              monitor=None
              ):
     super().__init__()
     self.image_key = image_key
     self.encoder = Encoder(**ddconfig)
     self.decoder = Decoder(**ddconfig)
     self.loss = instantiate_from_config(lossconfig)
     self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25)
     self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
     self.post_quant_conv = torch.nn.Conv2d(
         embed_dim, ddconfig["z_channels"], 1)
     if ckpt_path is not None:
         self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
     self.image_key = image_key
     if colorize_nlabels is not None:
         assert type(colorize_nlabels) == int
         self.register_buffer(
             "colorize", torch.randn(3, colorize_nlabels, 1, 1))
     if monitor is not None:
         self.monitor = monitor
Example #5
0
    def move_ticks(self, ticks_l, ticks_r):
        Encoder.write(0, 0)
        Encoder.write(0, 1)

        factor = abs(ticks_l)/TICKS_CELL

        self.setpointL = ticks_l
        self.setpointR = ticks_r

        self.inputR = 0
        self.inputL = 0

        start = time.time() * 1000
        while ((abs(self.inputL - self.setpointL) > 5 or abs(self.inputR - self.setpointR) > 5) and (time.time()* 1000 - start < factor *TIME)):
            self.inputL , self.inputR = self.read_encoders()
            #print(self.inputL)
            #print(self.inputR)

            self.PIDRight.compute(self.inputR, self.setpointR)
            self.PIDLeft.compute(self.inputL, self.setpointL)

            l_rem = abs(self.inputL - self.setpointL)
            r_rem = abs(self.inputR - self.setpointR)
            
            if l_rem > r_rem and (abs(l_rem - r_rem) > 10 or r_rem < 10):
                self.move_motors(self.PIDLeft.output(),0)
            elif r_rem > l_rem and (abs(l_rem - r_rem) > 10 or l_rem < 10):
                self.move_motors(0,self.PIDRight.output())
            else:
                self.move_motors(self.PIDLeft.output(),self.PIDRight.output()) 
            
        self.stop()
def decoder(Encoded_BitStream,predictedPerRef, no_frames = 1000,Resolution=1):
    Bitstream=jl.bitstream_bac_decode(Encoded_BitStream)
    
    #return residual, motion vectors, reference frame from bitstream

    #ref_frames has the same shape as the ref_frames variable in the encoder
    #mv has the same shape as the vid_mv variable in the encoder
    #vid_residuals has the same shape as vid_residuals in the encoder
    # all are need to be returned from the bitstream
    Reconstruced_frames=[]
    c=1
    for j in range(0,np.int(len(vid_frame)/predictedPerRef)):
        Reconstruced_frames.append(E.conv_decom_YUV2RGB(ref_frames[j]))

        #Reshaping the reference frames to use in the coming blocks
        im_ref,_,_=E.get_sub_images(E.reshape_image(ref_frames[j][0]))

        for i in range(0,predictedPerRef-1):
            #inverse spatial
            residual_frame=E.spatial_inverse_model(vid_residuals[c])

            #getting the predicted image
            p_image=E.predict(im_ref,vid_mv[c], ref_frames[0][0].shape[0], ref_frames[0][0].shape[1])

            #adding the residuals to get the reconstructed image
            Reconstructed=p_image+residual_frame
            c+=1
Example #7
0
    def __init__(self, model_params):
        self.word_indexer = model_params["word_indexer"]
        self.word_embedder = model_params["word_embedder"]
        self.embedding_size = model_params["embedding_size"]
        self.state_size = model_params["state_size"]
        self.mode_size = model_params["mode_size"]
        self.position_size = model_params["position_size"]
        self.ques_attention_size = model_params["ques_attention_size"]
        self.kb_attention_size = model_params["kb_attention_size"]
        self.dis_embedding_dim = model_params["dis_embedding_dim"]
        self.dis_hidden_dim = model_params["dis_hidden_dim"]
        self.max_fact_num = model_params["max_fact_num"]
        self.max_ques_len = model_params["max_ques_len"]

        self.learning_rate = model_params["learning_rate"]
        self.mode_loss_rate = model_params["mode_loss_rate"]
        self.position_loss_rate = model_params["position_loss_rate"]
        self.L2_factor = model_params["L2_factor"]
        self.batch_size = model_params["batch_size"]
        self.adv_batch_size = model_params["adv_batch_size"]
        self.epoch_size = model_params["epoch_size"]
        self.adv_epoch_size = model_params["adv_epoch_size"]
        self.instance_weight = 1.0 / self.batch_size
        self.MAX_LENGTH = model_params["MAX_LENGTH"]
        self.has_trained = False
        self.oracle_samples = []

        ################ Initialize graph components ########################
        self.encoder = Encoder(self.word_indexer.wordCount, self.state_size,
                               self.embedding_size)
        self.decoder = Decoder(output_size=self.word_indexer.wordCount,
                               state_size=self.state_size,
                               embedding_size=self.embedding_size,
                               mode_size=self.mode_size,
                               kb_attention_size=self.kb_attention_size,
                               max_fact_num=self.max_fact_num,
                               ques_attention_size=self.ques_attention_size,
                               max_ques_len=self.max_ques_len,
                               position_size=self.MAX_LENGTH)
        self.positioner = Positioner(self.state_size, self.position_size,
                                     self.MAX_LENGTH)
        self.dis = Discriminator(self.dis_embedding_dim,
                                 self.dis_hidden_dim,
                                 self.word_indexer.wordCount,
                                 self.MAX_LENGTH,
                                 gpu=use_cuda)

        if use_cuda:
            self.encoder.cuda()
            self.decoder.cuda()
            self.positioner.cuda()
            self.dis.cuda()

        self.optimizer = optim.Adam(list(self.encoder.parameters()) +
                                    list(self.decoder.parameters()) +
                                    list(self.positioner.parameters()),
                                    lr=self.learning_rate,
                                    weight_decay=self.L2_factor)
        self.dis_optimizer = optim.Adagrad(dis.parameters())
Example #8
0
    def __init__(self, input_size):
        super(RNN, self).__init__()

        self.encoder = Encoder(input_size)
        self.decoder = Decoder(input_size)

        self.loss = nn.CrossEntropyLoss()
        self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=0.1)
        self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=0.1)
Example #9
0
def set_up_encoder() -> Encoder:
    """
    set up encoder GPIO
    :return: ready to use encoder object list
    """
    for pin in EncoderPins:
        GPIO.setup(pin, GPIO.OUT)
    encoder = [Encoder.Encoder(EncoderPins.encoder1_m1, EncoderPins.encoder0_m1),
               Encoder.Encoder(EncoderPins.encoder1_m0, EncoderPins.encoder0_m0)]
    return encoder
Example #10
0
def samples_generator_c(batch_data_list, X_list, X_list_c, types_list,
                        batch_size, z_dim, y_dim, y_dim_partition, s_dim, tau,
                        normalization_params):

    samples_test = dict.fromkeys(['s', 'z', 'y', 'x'], [])
    test_params = dict()
    X = tf.concat(X_list, 1)
    X_c = tf.concat(X_list_c, 1)

    # Create the proposal of q(s|x^o)
    _, params = Encoder.s_proposal_multinomial_c(X,
                                                 X_c,
                                                 batch_size,
                                                 s_dim,
                                                 tau,
                                                 reuse=True)
    samples_test['s'] = tf.one_hot(tf.argmax(params, 1), depth=s_dim)

    # Create the proposal of q(z|s,x^o)
    _, params = Encoder.z_proposal_GMM_factorized_c(X_list,
                                                    X_c,
                                                    samples_test['s'],
                                                    batch_size,
                                                    z_dim,
                                                    reuse=True)
    samples_test['z'] = params[0]

    # Create deterministic layer y
    samples_test['y'] = tf.layers.dense(
        inputs=samples_test['z'],
        units=y_dim,
        activation=None,
        kernel_initializer=tf.random_normal_initializer(stddev=0.05),
        trainable=True,
        name='layer_h1_',
        reuse=True)

    grouped_samples_y = Decoder.y_partition(samples_test['y'], types_list,
                                            y_dim_partition)

    # Compute the parameters h_y
    theta = Decoder.theta_estimation_from_y(grouped_samples_y,
                                            types_list,
                                            batch_size,
                                            reuse=True)

    # Compute loglik and output of the VAE
    log_p_x, samples_test['x'], test_params[
        'x'] = Evaluation.loglik_evaluation(batch_data_list,
                                            types_list,
                                            theta,
                                            normalization_params,
                                            reuse=True)

    return samples_test, test_params, log_p_x, theta
Example #11
0
    def money_update(self):
        file = open('util_files/money.txt', 'r')
        all_money = 0

        for num in file:
            all_money = 10 * all_money + int(num)
        all_money = Encoder.decrypt(all_money)
        file.close()
        file = open('util_files/money.txt', 'w')
        all_money += self.money_score
        file.write(str(Encoder.encrypt(all_money)))
Example #12
0
def test_encoder_forward():
  #File: Encoder.py
  #Class: Encoder
  #Function: forward() and __init__()
  enc = Encoder(hidden_dim=32, conv_layers = 5)
  enc_param_list = []
  for param in enc.parameters():
    torch.nn.init.constant(param, 1.)
  correct_enc = torch.load('encoder/enc.pt')
  input = torch.rand((64, 32, 3))
  assert (enc(input).detach().numpy() == correct_enc(input).detach().numpy()).all()
  print('Sample Test Cases Passed')
Example #13
0
    def __init__(self):
        self.sensors = Sensors([pins.tof_front,pins.tof_rear,pins.tof_fleft,pins.tof_fright,pins.tof_rleft,pins.tof_rright], ["front", "rear","fleft","fright","rleft","rright"], [0x30,0x31,0x32,0x33,0x34,0x35])
        self._frontIR = self.sensors.sensors["front"]
        self._fleftIR = self.sensors.sensors["fleft"]
        self._frightIR = self.sensors.sensors["fright"]
        self._rearIR = self.sensors.sensors["rear"]
        self._rleftIR = self.sensors.sensors["rleft"]
        self._rrightIR = self.sensors.sensors["rright"]

        self.dir = True

        Encoder.init(pins.encoder_pin_l1, pins.encoder_pin_l2, 0)
        Encoder.init(pins.encoder_pin_r1, pins.encoder_pin_r2, 1)

        self.left_motor = Motor("Left", pins.motor_speed_l, pins.motor_direction_l, 0)

        self.right_motor = Motor("Right", pins.motor_speed_r, pins.motor_direction_r, 0)

        self.setpointL = 0
        self.inputL = 0
        self.PIDLeft = PID(self.inputL, self.setpointL, KP3, 0.002, 0.000, DIRECT, Timer)
        self.PIDLeft.set_output_limits(-1 * MOTOR_SPEED*1.5, MOTOR_SPEED*1.5)
        self.PIDLeft.set_mode(AUTOMATIC)

        self.setpointR = 0
        self.inputR = 0
        self.PIDRight = PID(self.inputR, self.setpointR, KP3, 0.002, 0.000, DIRECT, Timer)
        self.PIDRight.set_output_limits(-1 * MOTOR_SPEED*1.5, MOTOR_SPEED*1.5)
        self.PIDRight.set_mode(AUTOMATIC)

        self.setpointfL = 0
        self.inputfL = 0
        self.PIDfLeft = PID(self.inputfL, self.setpointfL, 2.0, 0.002, 0.000, DIRECT, Timer)
        self.PIDfLeft.set_output_limits(-1 * MOTOR_SPEED, MOTOR_SPEED)
        self.PIDfLeft.set_mode(AUTOMATIC)
        
        self.setpointfR = 0
        self.inputfR = 0
        self.PIDfRight = PID(self.inputfR, self.setpointfR, 2.0, 0.002, 0.000, DIRECT, Timer)
        self.PIDfRight.set_output_limits(-1 * MOTOR_SPEED, MOTOR_SPEED)
        self.PIDfRight.set_mode(AUTOMATIC)

        self.setpointrL = 0
        self.inputrL = 0
        self.PIDrLeft = PID(self.inputrL, self.setpointrL, 2.0, 0.002, 0.000, DIRECT, Timer)
        self.PIDrLeft.set_output_limits(-1 * MOTOR_SPEED, MOTOR_SPEED)
        self.PIDrLeft.set_mode(AUTOMATIC)
        
        self.setpointrR = 0
        self.inputrR = 0
        self.PIDrRight = PID(self.inputrR, self.setpointrR, 2.0, 0.002, 0.000, DIRECT, Timer)
        self.PIDrRight.set_output_limits(-1 * MOTOR_SPEED, MOTOR_SPEED)
        self.PIDrRight.set_mode(AUTOMATIC)
Example #14
0
def Time_User_encoding_preprocessing(path, set_devide):
    ###init____
    raw_data, ds_size, prefetching_list, user_cat= pre.get_withtime_files(path, FILTERING_SIZE)
    #label_encoder,cat_num = Encoder.Label_encoder(raw_data)
    label_encoded,cat_num = Encoder.Label_list(path, FILTERING_SIZE)
    prefetching_list.append('null')
    y_label_encoder, y_cat_num = Encoder.Label_encoder([prefetching_list])
    raw_set =[]
    data_set = []
    training_set = []
    testing_set = []
    count = 0
    with_pref_count =0
    without_pre_count = 0
    ###encoding___
    print('start Label_encoding!...')
    recorder =0
    for l in range(len(raw_data)):
        buff = []
        x=[]
        x_time = []
        x_id = []
        y=[]
        count+=1
        for i in range(len(raw_data[l])):
            x.append(label_encoded[recorder])
            x_time.append([int(raw_data[l][i][2])])
            x_id.append([int(raw_data[l][i][1])])
            recorder += 1
            if i<len(raw_data[l])-1:
                if raw_data[l][i+1][0] in prefetching_list:
                    with_pref_count += 1
                    y.append(y_label_encoder.transform([raw_data[l][i+1][0]])[0])
                else:
                    without_pre_count += 1
                    y.append(y_label_encoder.transform(['null'])[0])
        data_set.append(([x[:-1],x_id[:-1],x_time[:-1]], y))
        sum = Encoder.progress_bar(count, len(raw_data))
    print('Encoding Finished!!\n')
    print('NULL number: ',y_label_encoder.transform(['null'])[0])
    #----
    size = len(data_set)
    random.shuffle(data_set)
    training_set = data_set[:int(size*set_devide)]
    #training_set = data_set
    testing_set = data_set[int(size*set_devide):-1]
    print('BURSTY STAMP:', with_pref_count)
    print('NORMAL CONNECT:', without_pre_count)
    print('Length of training_set:',len(training_set))
    print('Length of testing_set:',len(testing_set))
    print('Number of categories:',cat_num)
    print('Number of Y categories:',y_cat_num)
    return training_set,testing_set, cat_num, y_cat_num, y_label_encoder.transform(['null'])[0],user_cat
Example #15
0
def Url2vec_encoding_stream(path, set_devide):
    raw_data, ds_size, prefetching_list, user_cat= pre.get_withtime_files_stream(path, FILTERING_SIZE)
    url2vec_encoder = u2v.Url2vec_Model()
    url2vec_transformer = url2vec_encoder.reload('./100_15_MyModel')
    label_encoded,cat_num = Encoder.Label_list(path, FILTERING_SIZE)
    prefetching_list.append('null')
    y_label_encoder, y_cat_num = Encoder.Label_encoder([prefetching_list])
    raw_set =[]
    data_set = []
    training_set = []
    testing_set = []
    count = 0
    with_pref_count =0
    without_pre_count = 0
    ###encoding___
    for l in range(len(raw_data)):
        buff = []
        x=[]
        x_time = []
        x_id = []
        time_unit_stamp = []
        y=[]
        count+=1
        for i in range(len(raw_data[l])):
            x.append(list(url2vec_transformer[raw_data[l][i][0]]))
            x_time.append([int(raw_data[l][i][2])])
            x_id.append([int(raw_data[l][i][1])])
            time_unit_stamp.append([int(raw_data[l][i][3])])
            if i<len(raw_data[l])-1:
                if raw_data[l][i+1][0] in prefetching_list:
                    with_pref_count += 1
                    y.append(y_label_encoder.transform([raw_data[l][i+1][0]])[0])
                else:
                    without_pre_count += 1
                    y.append(y_label_encoder.transform(['null'])[0])
        data_set.append((time_unit_stamp[0][0],[x[:-1],x_id[:-1],x_time[:-1]], y))
        sum = Encoder.progress_bar(count, len(raw_data))
    print('Encoding Finished!!\n')
    print('NULL number: ',y_label_encoder.transform(['null'])[0])
    #----
    size = len(data_set)
    random.shuffle(data_set)
    training_set = data_set[:int(size*set_devide)]
    #training_set = data_set
    testing_set = data_set[int(size*set_devide):-1]
    print('BURSTY STAMP:', with_pref_count)
    print('NORMAL CONNECT:', without_pre_count)
    print('Length of training_set:',len(training_set))
    print('Length of testing_set:',len(testing_set))
    print('Number of categories:',cat_num)
    print('Number of Y categories:',y_cat_num)
    return training_set,testing_set, cat_num, y_cat_num, y_label_encoder.transform(['null'])[0],user_cat
Example #16
0
def main(argv):

    if len(argv) < 2:
        usage()
    try:
        opts, args = getopt.getopt(argv, "hi:o:", ["ifile=", "ofile="])
    except getopt.GetoptError:
        usage()

    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ("-i", "--ifile"):
            filein = arg
        elif opt in ("-s", "--source"):
            filein = arg

    print("Reading from file " + filein)
    result = Utils.readFromFile(filein)
    result = result.replace('\n', '').replace('\r', '')

    print Utils.getTextAlphabet(result)

    print('')
    print("Source from string")
    src = Utils.getSymbolsProbs(result)
    print(src)

    txt = result
    print('')
    print("Minim. length (optimal?): ")
    k = int(Utils.computeMinimumLength(src, txt))
    print k

    print "Going to encode text: " + txt
    print('')
    print("Arithmetic encoding: ")
    codeword = Encoder.arithmeticEncode(txt, src, k)
    print("Arithmetic encoding FINISHED. Codeword: " + codeword)

    print('')
    print('')
    print("Arithmetic decoding: ")
    decodedText = Encoder.arithmeticDecode(codeword, src, k, len(txt))
    print("Arithmetic decoding FINISHED. Decoded text:  " + decodedText)

    print("")
    print "Before: " + txt
    print "Code word: " + codeword
    print "After: " + decodedText
    print "Is the text the same?: " + str((txt == decodedText))
Example #17
0
def record_check(score, game_num):
    file = open('util_files/record_{}.txt'.format(game_num), 'r')
    last_score = ''
    for letter in file:
        last_score += str(letter)
    last_score = Encoder.decrypt(int(last_score))
    file.close()
    if score > last_score:
        file = open('util_files/record_{}.txt'.format(game_num), 'w')
        file.write(str(Encoder.encrypt(score)))
        file.close()
        return [last_score, True]
    else:
        return [last_score, False]
Example #18
0
    def __init__(self, nb_features, nb_encoder_cell, nb_decoder_cell, learning_rate=1e-2):
        self.features = tf.placeholder(tf.float32, [None, None, nb_features]) # Batch, step, features

        batch_shape = tf.shape(self.features)
        self.batch_size = batch_shape[0]
        self.nb_step = batch_shape[1]
        self.nb_features = nb_features

        with tf.variable_scope("Encoder"):
            self.encoder = Encoder(self.features, nb_encoder_cell)

        with tf.variable_scope("decoder"):
            self.decoder = Decoder(batch_shape, nb_features, self.encoder.last_state, nb_decoder_cell)

        self._create_learning_tensors(learning_rate)
Example #19
0
    def __init__(self, input_size, word_vec_dim, hidden_size, output_size, 
                n_layers=4, dropout_p=.2):
        self.input_size = input_size
        self.word_vec_dim = word_vec_dim
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers
        self.dropout_p = dropout_p
        
        super(Seq2Seq, self).__init__()

        self.emb_src = nn.Embedding(input_size, word_vec_dim)
        self.emb_dec = nn.Embedding(output_size, word_vec_dim)

        self.encoder = Encoder(word_vec_dim, hidden_size,
                               n_layers=n_layers,
                               dropout_p=dropout_p)
        
        self.decoder = Decoder(word_vec_dim, hidden_size,
                               n_lyaers=n_layers,
                               dropout_p=dropout_p)
        
        self.attn = Attention(hidden_size)

        self.concat = nn.Linear(hidden_size*2, hidden_size)
        self.tanh = nn.Tanh()
        self.generator = Generator(hidden_size, output_size)
Example #20
0
def get():
    obj = Encoder.to_dict(request.vars)

    rows = db(
        (db.auth_membership.user_id == db.UsuarioUSB.id) & (db.auth_membership.group_id == db.auth_group.id)).select()
    prueba=rows.as_json()
    return rows.as_json()
Example #21
0
 def __init__(self, enc_unit, dec_unit, batch_size, horizon_size, dropout_rate):
     super(Seq2Seq, self).__init__()
     
     self.batch_size = batch_size
     
     self.encoder = Encoder(enc_unit, batch_size, horizon_size+1, dropout_rate)
     self.decoder = Decoder(dec_unit, batch_size, horizon_size+1, dropout_rate)
Example #22
0
    def __init__(self, heizGPIO, ruehrGPIO, beeperGPIO, dreh1GPIO, dreh2GPIO,
                 pushGPIO):
        #Leere Listen erzeugen
        self.TempList = []
        self.SollList = []
        self.xList = []

        # Zähler der Daten in CSV Datei
        self.counterRow = 0
        # Letzter Temperatur-Meßwert
        self.lastTemp = 0

        # Rote Steckdose - Heizung
        self.RedSwitch = Switch(heizGPIO, "RedSwitch")
        # Blaue Steckdos - Rührer
        self.BlueSwitch = Switch(ruehrGPIO, "BlueSwitch")

        # Beeper
        self.beeper = Beeper(beeperGPIO)

        # Encoder
        self.dreh = Encoder.Encoder(dreh1GPIO, dreh2GPIO)
        GPIO.setup(pushGPIO, GPIO.IN)
        GPIO.add_event_detect(pushGPIO,
                              GPIO.FALLING,
                              callback=self.pushButton,
                              bouncetime=30)

        #Datenbank initialisieren
        self.initDB()
Example #23
0
    def __init__(self,
                 num_layers,
                 num_heads,
                 d_model,
                 dense_dim,
                 in_vocab_size,
                 tar_vocab_size,
                 input_max_position,
                 target_max_position,
                 rate=0.1):
        super().__init__()

        self.encoder = Encoder(num_layers,
                               num_heads,
                               d_model,
                               dense_dim,
                               in_vocab_size,
                               max_encoding_position=input_max_position,
                               dropout=0.1)

        self.decoder = Decoder(num_layers,
                               num_heads,
                               d_model,
                               dense_dim,
                               tar_vocab_size,
                               max_encoding_position=target_max_position,
                               dropout=0.1)

        self.dense = tf.keras.layers.Dense(tar_vocab_size)
Example #24
0
File: s3g.py Project: jetty840/s3g
  def get_advanced_version(self):
    """
    Get the firmware version number of the connected machine
    @return Version number
    """
    payload = struct.pack(
      '<BH',
      host_query_command_dict['GET_ADVANCED_VERSION'], 
      s3g_version,
    )

    response = self.writer.send_query_payload(payload)
    [response_code,
     version,
     internal_version,
     reserved_a,
     reserved_b] = Encoder.unpack_response('<BHHHH', response)

    version_info = {
    'Version' : version,
    'InternalVersion' : internal_version,
    'ReservedA'  : reserved_a,
    'ReservedB'  : reserved_b,
    }

    return version_info
Example #25
0
    def upload_chunked_part(self, chunk, api=None):
        if not api:
            api = self.api

        with open(chunk.path, "r") as fd:
            mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
            chunk_bytes = mm[chunk.range_start:chunk.range_end]

        encoded_chunk = Encoder.encode(chunk_bytes)

        file_metadata = {
            'name': chunk.media.name + str(chunk.part),
            'mimeType': 'application/vnd.google-apps.document',
            'parents': [chunk.parent],
            'properties': {
                'part': str(chunk.part)
            }
        }

        mediaio_file = MediaIoBaseUpload(io.StringIO(encoded_chunk),
                                         mimetype='text/plain')

        self.api.upload_single_file(mediaio_file, file_metadata)

        return len(chunk_bytes)
Example #26
0
File: uds.py Project: fnzv/uds
def ext_upload_chunked_part(chunk):
    api = GoogleAPI()
    #print("Chunk %s, bytes %s to %s" % (chunk.part, chunk.range_start, chunk.range_end))

    with open(chunk.path, "r") as fd:
        mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
        chunk_bytes = mm[chunk.range_start:chunk.range_end]

    encoded_chunk = Encoder.encode(chunk_bytes)

    file_metadata = {
        'name': chunk.media.name + str(chunk.part),
        'mimeType': 'application/vnd.google-apps.document',
        'parents': [chunk.parent],
        'properties': {
            'part': str(chunk.part)
        }
    }

    mediaio_file = MediaIoBaseUpload(io.StringIO(encoded_chunk),
                                     mimetype='text/plain')

    api.upload_single_file(mediaio_file, file_metadata)

    return len(chunk_bytes)
Example #27
0
def etapas():
    obj = Encoder.to_dict(request.vars)

    rows = Etapa.find(obj)

    response.view = 'mis_pasantias/etapas.load.html'
    return dict(etapas=rows.as_list(),id="id")
Example #28
0
File: s3g.py Project: jetty840/s3g
  def queue_point_new_ext(self, position, dda_rate, relative_axes, distance, feedrate):
    """
    Queue a position with the new style!  Moves to a certain position over a given duration
    with either relative or absolute positioning.  Relative vs. Absolute positioning
    is done on an axis to axis basis.

    @param list position: A 5 dimentional position in steps specifying where each axis should move to
    @param int dda_rate: Steps per second along the master axis
    @param list relative_axes: Array of axes whose coordinates should be considered relative
    @param float distance: distance in millimeters moved in (x,y,z) space OR if distance(x,y,z) == 0, then max(distance(A),distance(B))
    @param float feedrate: the actual feedrate in units of millimeters/second
    """
    if len(position) != self.extendedPointLength:
      raise PointLengthError(len(position))

    payload = struct.pack(
      '<BiiiiiIBfh',
      host_action_command_dict['QUEUE_POINT_NEW_EXT'],
      position[0], position[1], position[2], position[3], position[4],
      dda_rate,
      Encoder.encode_axes(relative_axes),
      float(distance),
      int(float(feedrate)*64.0)
    )

    self.writer.send_action_payload(payload)
Example #29
0
File: uds.py Project: fnzv/uds
    def build_file(self, parent_id):
        # This will fetch the Docs one by one, concatting them
        # to a local base64 file. The file will then be converted
        # from base64 to the appropriate mimetype
        items = self.api.recursive_list_folder(parent_id)

        folder = self.api.get_file(parent_id)

        if not items:
            print('No parts found.')
        else:
            # Fix part as int
            for item in items:
                item['properties']['part'] = int(item['properties']['part'])

            #print('Parts of %s:' % folder['name'])
            items.sort(key=lambda x: x['properties']['part'], reverse=False)

            f = open("%s/%s" % (get_downloads_folder(), folder['name']), "wb")

            for i, item in enumerate(items):
                #print('%s (%s)' % (item['properties']['part'], item['id']))
                progress_bar("Downloading %s" % folder['name'], i, len(items))

                encoded_part = self.download_part(item['id'])

                # Decode
                decoded_part = Encoder.decode(encoded_part)

                # Append decoded part to file
                f.write(decoded_part)

            f.close()

            progress_bar("Downloaded %s" % folder['name'], 1, 1)
Example #30
0
File: s3g.py Project: jetty840/s3g
  def extended_stop(self, halt_steppers, clear_buffer):
    """
    Stop the stepper motor motion and/or reset the command buffer.  This differs from the 
    reset and abort commands in that a soft reset of all functions isnt called.
    @param boolean halt_steppers: A flag that if true will stop the steppers
    @param boolean clear_buffer: A flag that, if true, will clear the buffer
    """
    bitfield = 0
    if halt_steppers:
      bitfield |= 0x01
    if clear_buffer:
      bitfield |= 0x02

    payload = struct.pack(
      '<Bb',
      host_query_command_dict['EXTENDED_STOP'],  
      bitfield,
    )

    response = self.writer.send_query_payload(payload)

    [response_code, extended_stop_response] = Encoder.unpack_response('<BB', response)

    if extended_stop_response != 0:
      raise ExtendedStopError
Example #31
0
File: s3g.py Project: jetty840/s3g
  def get_communication_stats(self):
    """
    Get some communication statistics about traffic on the tool network from the Host.
    """
    payload = struct.pack(
      '<B',
      host_query_command_dict['GET_COMMUNICATION_STATS'],
    )
     
    response = self.writer.send_query_payload(payload)

    [response_code,
     packetsReceived,
     packetsSent,
     nonResponsivePacketsSent,
     packetRetries,
     noiseBytes] = Encoder.unpack_response('<BLLLLL', response)

    info = {
    'PacketsReceived' : packetsReceived,
    'PacketsSent' : packetsSent,
    'NonResponsivePacketsSent' : nonResponsivePacketsSent,
    'PacketRetries' : packetRetries,
    'NoiseBytes' : noiseBytes,
    }
    return info
Example #32
0
File: s3g.py Project: jetty840/s3g
  def get_build_stats(self):
    """
    Get some statistics about the print currently running, or the last print if no print is active
    """
    payload = struct.pack(
      '<B',
      host_query_command_dict['GET_BUILD_STATS'],
    )
     
    response = self.writer.send_query_payload(payload)

    [response_code,
     build_state,
     build_hours,
     build_minutes,
     line_number,
     reserved] = Encoder.unpack_response('<BBBBLL', response)

    info = {
    'BuildState' : build_state,
    'BuildHours' : build_hours,
    'BuildMinutes' : build_minutes,
    'LineNumber' : line_number,
    'Reserved' : reserved
    }
    return info
Example #33
0
 def Unit_single(self):
     dict1 = {}
     dict1 = Encoder.import_coding(self.modelpara_dict['Model_name'])
     mname = self.modelpara_dict['Model_name']
     rfile = "./datafiles/" + self.modelpara_dict[
         'Name'] + '.' + self.modelpara_dict['Format']
     print(rfile)
     with open(rfile, 'r') as rfile:
         with open("./datafiles/codefiles/" + mname + '_code.csv',
                   'w') as wfile:
             data_reader = csv.reader(rfile)
             data_writer = csv.writer(wfile)
             first = True
             row_length = 0
             lable_num = int(
                 self.modelpara_dict['Lable_field']
             ) - 1  #start from 0,but user's input starts from 1
             code_field = []
             for row in data_reader:
                 if first:
                     code_field = self.code_Judge(
                         row, lable_num
                     )  # just judge first line to decide codefiled
                     print("codelist", code_field)
                     self.rowlength = len(row)
                     first = False
                 row_code = self.row_ValueToCode(row, self.rowlength,
                                                 code_field, lable_num,
                                                 dict1, mname)
                 data_writer.writerow(row_code)
                 #kafkaproducer.senddata(",".join(row_code))
     self.save_lable_db()
Example #34
0
File: s3g.py Project: jetty840/s3g
  def get_next_filename(self, reset):
    """
    Get the next filename from the machine
    @param boolean reset: If true, reset the file index to zero and return the first 
    available filename.
    """
    if reset == True:
      flag = 1
    else:
      flag = 0

    payload = struct.pack(
      '<Bb',
      host_query_command_dict['GET_NEXT_FILENAME'], 
      flag,
    )

    response = self.writer.send_query_payload(payload)
   
    [response_code, sd_response_code, filename] = Encoder.unpack_response_with_string('<BB', response)

    if sd_response_code != sd_error_dict['SUCCESS']:
      raise SDCardError(sd_response_code)

    return filename
Example #35
0
File: uds.py Project: tangkv/uds
    def build_file(self, parent_id):
        """Download a uds file

        This will fetch the Docs one by one, concatting them
        to a local base64 file. The file will then be converted
        from base64 to the appropriate mimetype.

        Args:
            parent_id (str): The ID of the containing folder
        """
        items = self.api.recursive_list_folder(parent_id)

        folder = self.api.get_file(parent_id)

        if not items:
            print('No parts found.')
            return

        # Fix part as int
        for item in items:
            item['properties']['part'] = int(item['properties']['part'])

        #print('Parts of %s:' % folder['name'])
        items.sort(key=lambda x: x['properties']['part'], reverse=False)

        f = open("%s/%s" % (get_downloads_folder(), folder['name']), "wb")
        progress_bar_chunks = tqdm(total=len(items),
                                   unit='chunks',
                                   dynamic_ncols=True,
                                   position=0)
        progress_bar_speed = tqdm(total=len(items) * CHUNK_READ_LENGTH_BYTES,
                                  unit_scale=1,
                                  unit='B',
                                  dynamic_ncols=True,
                                  position=1)

        for i, item in enumerate(items):
            encoded_part = self.download_part(item['id'])

            # Decode
            decoded_part = Encoder.decode(encoded_part)
            progress_bar_chunks.update(1)
            progress_bar_speed.update(CHUNK_READ_LENGTH_BYTES)

            # Append decoded part to file
            f.write(decoded_part)

        file_hash = self.hash_file(f.name)

        f.close()

        original_hash = folder.get("properties").get("sha256")
        if (file_hash != original_hash and original_hash is not None):
            print(
                "Failed to verify hash\nDownloaded file had hash %s compared to original %s",
                (file_hash[:9], original_hash[:9]))
            os.remove(f.name)

        progress_bar("Downloaded %s" % folder['name'], 1, 1)
    def __init__(self, image_embed_size, word_embed_size, rnn_hidden_size,
                 num_rnn_steps, vocab_size, latent_size, cluster_embed_size):
        self.encoder = Encoder(image_embed_size, word_embed_size,
                               rnn_hidden_size, num_rnn_steps, latent_size,
                               vocab_size, cluster_embed_size)
        self.decoder = Decoder(image_embed_size, latent_size, word_embed_size,
                               rnn_hidden_size, num_rnn_steps, vocab_size,
                               cluster_embed_size)

        self.image_embed_size = image_embed_size
        self.latent_size = latent_size
        self.word_embed_size = word_embed_size
        self.rnn_hidden_size = rnn_hidden_size
        self.num_rnn_steps = num_rnn_steps
        self.vocab_size = vocab_size
        self.latent_size = latent_size
        self.cluster_embed_size = cluster_embed_size
Example #37
0
    def count(self,options):
        query = DBhandler.getQuery(options)

        condition = query["condition"]

        where = Encoder.enQuery(self,condition)

        return self.db(where).count()
Example #38
0
    def __init__(self, in_channels=3, pretrained_path=None, cfg=None):
        super().__init__()
        self.pretrained_path = pretrained_path
        self.config = cfg or {'align': False}

        # Encoder
        self.encoder = nn.Sequential(OrderedDict([
            ('backbone', Encoder(in_channels, self.pretrained_path)),]))
Example #39
0
 def setUpClass(cls):
     cls.test_file = "test_encoder.csv"
     cls.dt_classifier = DecisionTreeClassifier(cls.test_file, "info gain",
                                                1)
     cls.en = Encoder(cls.dt_classifier.data, [
         "Gender", "Color", "Sport", "Dominant Hand", "Home State",
         "Allergy", "Food"
     ])
Example #40
0
def get():
    obj = Encoder.to_dict(request.vars)

    rows = Pasantia.JMaterias(obj)

    rows = rows.as_json()

    return rows
Example #41
0
def get():
    obj = Encoder.to_dict(request.vars)

    rows = Permiso.find(obj)

    rows = rows.as_json()

    return rows
Example #42
0
def get():
    obj = Encoder.to_dict(request.vars)

    rows = Accion_Usuario.find(obj)

    rows = rows.as_json()

    return rows
 def __init__(self, config, src_embedding_num, tag_num, embedding_matrix, embedding_dim_size):
     super(vanilla_model, self).__init__()
     self.config = config
     self.encoder = Encoder(config, src_embedding_num, embedding_matrix, embedding_dim_size)
     if self.config.decoder == 'crf':
         self.decoder = CRF_decoder(config.d_model, tag_num)
     elif self.config.decoder == 'softmax':
         self.decoder = Softmax_decoder(config.d_model,tag_num)
Example #44
0
File: s3g.py Project: jetty840/s3g
 def get_motor1_speed_PWM(self, tool_index):
   """
   Gets the toohead's motor speed in as a 0 - 255 / 255 PWM duty cycle.
   @param int tool_index: The tool index that will be queried for Motor speed
   @return byte pwm : PWM duty cycle, 0% = 0, 100% = 255
   """
   response = self.tool_query(tool_index, slave_query_command_dict['GET_MOTOR_1_SPEED_PWM'])
   [response_code, pwm] = Encoder.unpack_response('<BB', response)
   return pwm
Example #45
0
File: s3g.py Project: jetty840/s3g
 def get_motor1_speed(self, tool_index):
   """
   Gets the toohead's motor speed in Rotations per Minute (RPM)
   @param int tool_index: The tool index that will be queried for Motor speed
   @return int Duration of each rotation, in miliseconds
   """
   response = self.tool_query(tool_index, slave_query_command_dict['GET_MOTOR_1_SPEED_RPM'])
   [response_code, speed] = Encoder.unpack_response('<BI', response)
   return speed
Example #46
0
File: s3g.py Project: jetty840/s3g
  def get_platform_target_temperature(self, tool_index):
    """
    Retrieve the build platform target temperature (setpoint)
    @param int tool_index: Toolhead Index
    @return int temperature: that the build platform is attempting to achieve
    """
    response = self.tool_query(tool_index, slave_query_command_dict['GET_PLATFORM_TARGET_TEMP'])
    [response_code, temperature] = Encoder.unpack_response('<BH', response)

    return temperature
Example #47
0
File: s3g.py Project: jetty840/s3g
  def get_platform_temperature(self, tool_index):
    """
    Retrieve the build platform temperature
    @param int tool_index: Toolhead Index
    @return int temperature: reported by the toolhead
    """
    response = self.tool_query(tool_index, slave_query_command_dict['GET_PLATFORM_TEMP'])
    [response_code, temperature] = Encoder.unpack_response('<BH', response)

    return temperature
Example #48
0
def replaceWithGrayCodeBinary(matrix, numberOfBits):
	bitValues = BG.generateBitList(numberOfBits - 2);
	grayValues = []
	for bitValue in bitValues: 
		grayValues.append(BSH.charListToString(Encoder.convertToGray(bitValue)))

	for i in range(0,12):
		for j in range(0,12):
			if matrix[i][j] != None and matrix[i][j] != ' ':
				matrix[i][j] = grayValues[int(matrix[i][j])]
Example #49
0
    def find(self,options):
        query = DBhandler.getQuery(options)

        condition = query['condition']
        order = query['order']
        limit = query['limit']

        where = Encoder.enQuery(self,condition)
        rows = self.db(where).select(orderby=eval(order),limitby=limit)

        return rows
Example #50
0
File: s3g.py Project: jetty840/s3g
  def store_home_positions(self, axes):
    """
    Write the current axes locations to the EEPROM as the home position
    @param list axes: Array of axis names ['x', 'y', ...] whose position should be saved
    """
    payload = struct.pack(
      '<BB',
      host_action_command_dict['STORE_HOME_POSITIONS'], 
      Encoder.encode_axes(axes)
    )

    self.writer.send_action_payload(payload)
Example #51
0
File: s3g.py Project: jetty840/s3g
  def recall_home_positions(self, axes):
    """
    Recall and move to the home positions written to the EEPROM
    @param axes: Array of axis names ['x', 'y', ...] whose position should be saved
    """
    payload = struct.pack(
      '<BB',
      host_action_command_dict['RECALL_HOME_POSITIONS'], 
      Encoder.encode_axes(axes)
    )

    self.writer.send_action_payload(payload)
Example #52
0
File: s3g.py Project: jetty840/s3g
  def is_finished(self):
    """
    Checks if the steppers are still executing a command
    """
    payload = struct.pack(
      '<B',
      host_query_command_dict['IS_FINISHED'],
    )

    response = self.writer.send_query_payload(payload)
    
    [response_code, isFinished] = Encoder.unpack_response('<B?', response)
    return isFinished
Example #53
0
def configuracion():
	rows = []
	obj = Encoder.to_dict(request.vars)

	#TODO Hacer que esto filtre dependiendo del rol del usuario logeado
	if ('currentUser' in session):
		usuario = session.currentUser
		rol = db((db.auth_membership.user_id == auth.user.id)
				 & (db.auth_membership.group_id == db.auth_group.id)).select().first()
		rows = db((db.Accion_Usuario.rol == rol.auth_group.id) & (db.Accion_Usuario.contexto == 'configuracion')).select()

	response.view = 'sidebar/configuracion.load.html'
	return dict(routes=rows,id="id")
Example #54
0
File: s3g.py Project: jetty840/s3g
  def get_version(self):
    """
    Get the firmware version number of the connected machine
    @return Version number
    """
    payload = struct.pack(
      '<BH',
      host_query_command_dict['GET_VERSION'], 
      s3g_version,
    )

    response = self.writer.send_query_payload(payload)
    [response_code, version] = Encoder.unpack_response('<BH', response)
    return version
Example #55
0
File: s3g.py Project: jetty840/s3g
  def get_motherboard_status(self):
    """
    Retrieve bits of information about the motherboard
    @return: A python dictionary of various flags and whether theywere set or not at reset
    POWER_ERRPR : An error was detected with the system power.
    HEAT_SHUTDOWN : The heaters were shutdown because the bot was inactive for over 20 minutes
    """
    payload = struct.pack(
      '<B',
      host_query_command_dict['GET_MOTHERBOARD_STATUS'],
    )
  
    response = self.writer.send_query_payload(payload)
    

    [response_code, bitfield] = Encoder.unpack_response('<BB', response)

    bitfield = Encoder.decode_bitfield(bitfield)
    flags = {
    'POWER_ERROR'   : bitfield[7],
    'HEAT_SHUTDOWN' : bitfield[6],
    }
    return flags
Example #56
0
File: s3g.py Project: jetty840/s3g
  def set_potentiometer_value(self, axis, value):
    """
    Sets the value of the digital potentiometers that control the voltage references for the botsteps
    @param axis: Axis whose potentiometers should be set
    @param int value: The value to set the digital potentiometer to.
    """
    payload = struct.pack(
      '<BBB',
      host_action_command_dict['SET_POT_VALUE'], 
      Encoder.encode_axis(axis), 
      value
    )

    self.writer.send_action_payload(payload)
Example #57
0
File: s3g.py Project: jetty840/s3g
  def get_available_buffer_size(self):
    """
    Gets the available buffer size
    @return Available buffer size, in bytes
    """
    payload = struct.pack(
      '<B',
      host_query_command_dict['GET_AVAILABLE_BUFFER_SIZE'],
    )

    response = self.writer.send_query_payload(payload)
    [response_code, buffer_size] = Encoder.unpack_response('<BI', response)

    return buffer_size
Example #58
0
File: s3g.py Project: jetty840/s3g
  def get_build_name(self):
    """
    Get the build name of the file printing on the machine, if any.
    @param str filename: The filename of the current print 
    """
    payload = struct.pack(
      '<B',
      host_query_command_dict['GET_BUILD_NAME']
    )

    response = self.writer.send_query_payload(payload)
    [response_code, filename] = Encoder.unpack_response_with_string('<B', response)

    return filename
Example #59
0
File: s3g.py Project: jetty840/s3g
  def end_capture_to_file(self):
    """
    Send the end capture signal to the bot, so it stops capturing data and writes all commands out to a file on the SD card
    @return The number of bytes written to file
    """
    payload = struct.pack(
      '<B',
      host_query_command_dict['END_CAPTURE'],
    )

    response = self.writer.send_query_payload(payload)
    
    [response_code, sdResponse] = Encoder.unpack_response('<BI', response)
    return sdResponse
Example #60
0
File: s3g.py Project: jetty840/s3g
  def get_toolhead_version(self, tool_index):
    """
    Get the firmware version number of the specified toolhead
    @return double Version number
    """
    payload = struct.pack(
      '<H',
      s3g_version
    )
   
    response = self.tool_query(tool_index,slave_query_command_dict['GET_VERSION'], payload)
    [response_code, version] = Encoder.unpack_response('<BH', response)

    return version