Example #1
0
def cat_post_saved(sender, **kwargs):
    cat = kwargs.get("instance")
    MEDIA_ROOT = settings.MEDIA_ROOT

    filename = os.path.basename(str(cat.avatar))
    ext = os.path.splitext(filename)[1]
    avatar_dir = os.path.join("%d" % (cat.id % 1000), "%d" % cat.id)
    new_filename = os.path.join(avatar_dir, "%d%s" % (cat.id, ext))

    filename_without_ext = os.path.splitext(filename)[0]
    if re.compile("^\d+$").match(filename_without_ext):
        if int(filename_without_ext) == cat.id:
            return

    try:
        os.makedirs(os.path.join(MEDIA_ROOT, "img", "avatar", avatar_dir))
    except:
        pass

    abs_origin_path = os.path.join(MEDIA_ROOT, "img", "avatar", filename)
    abs_new_path = os.path.join(MEDIA_ROOT, "img", "avatar", new_filename)

    if os.path.exists(abs_origin_path):
        if os.path.exists(abs_new_path):
            os.unlink(abs_new_path)
        try:
            os.rename(abs_origin_path, abs_new_path)
        except:
            bp()
            pass
        cat.avatar = os.path.join("img", "avatar",
                                  new_filename).replace(os.sep, "/")
        cat.save()
Example #2
0
def cat_post_saved(sender, **kwargs):
    cat = kwargs.get("instance")
    MEDIA_ROOT = settings.MEDIA_ROOT

    filename = os.path.basename(str(cat.avatar))
    ext = os.path.splitext(filename)[1]
    avatar_dir = os.path.join("%d" % (cat.id % 1000), "%d" % cat.id)
    new_filename = os.path.join(avatar_dir, "%d%s" % (cat.id, ext))

    filename_without_ext = os.path.splitext(filename)[0]
    if re.compile("^\d+$").match(filename_without_ext):
        if int(filename_without_ext) == cat.id:
            return

    try:
        os.makedirs(os.path.join(MEDIA_ROOT, "img", "avatar", avatar_dir))
    except:
        pass

    abs_origin_path = os.path.join(MEDIA_ROOT, "img", "avatar", filename)
    abs_new_path = os.path.join(MEDIA_ROOT, "img", "avatar", new_filename)

    if os.path.exists(abs_origin_path):
        if os.path.exists(abs_new_path):
            os.unlink(abs_new_path)
        try:
            os.rename(abs_origin_path, abs_new_path)
        except:
            bp()
            pass
        cat.avatar = os.path.join("img", "avatar", new_filename).replace(os.sep, "/")
        cat.save()
Example #3
0
def parse_datasource_file(
        file_path: Path, data_context: DataContext
) -> Optional[Dict[Any, Optional[Datasource]]]:
    datasources = {}

    if exists(file_path) and isfile(file_path):
        with open(file_path) as ds_file:
            # try:
            #     data_json = json.load(ds_file)
            # except json.JSONDecodeError as e:
            #     logging.error(
            #         f"Provided datasource file ({file_path}) is not a JSON file, parse error follows: \n{e}"
            #     )

            #     return None

            try:
                data_yml = yaml.load(ds_file)
            except Exception as e:
                bp()

            for name, ds_json in data_yml.items():
                ds_spec = parse_datasource_spec(name, ds_json, None,
                                                data_context)
                datasources[name] = ds_spec

    else:

        logging.error(
            f'Provided datasource file path does not exist or is not a file: "{file_path}"'
        )
        return None

    return datasources
def prepare_custom_data(working_directory,
                        train_enc,
                        train_dec,
                        test_enc,
                        test_dec,
                        enc_vocabulary_size,
                        dec_vocabulary_size,
                        tokenizer=None):

    # Create vocabularies of the appropriate sizes.
    enc_vocab_path = os.path.join(working_directory,
                                  "vocab%dENC.txt" % enc_vocabulary_size)
    dec_vocab_path = os.path.join(working_directory,
                                  "vocab%dDEC.txt" % dec_vocabulary_size)
    create_vocabulary(enc_vocab_path, train_enc, enc_vocabulary_size,
                      tokenizer)
    create_vocabulary(dec_vocab_path, train_dec, dec_vocabulary_size,
                      tokenizer)

    # Create token ids for the training data.
    #bp()
    enc_train_ids_path = train_enc + (".ids%d" % enc_vocabulary_size)
    dec_train_ids_path = train_dec + (".ids%d" % dec_vocabulary_size)
    data_to_token_ids(train_enc, enc_train_ids_path, enc_vocab_path, tokenizer)
    data_to_token_ids(train_dec, dec_train_ids_path, dec_vocab_path, tokenizer)

    # Create token ids for the development data.
    bp()
    enc_dev_ids_path = test_enc + (".ids%d" % enc_vocabulary_size)
    dec_dev_ids_path = test_dec + (".ids%d" % dec_vocabulary_size)
    data_to_token_ids(test_enc, enc_dev_ids_path, enc_vocab_path, tokenizer)
    data_to_token_ids(test_dec, dec_dev_ids_path, dec_vocab_path, tokenizer)

    return (enc_train_ids_path, dec_train_ids_path, enc_dev_ids_path,
            dec_dev_ids_path, enc_vocab_path, dec_vocab_path)
Example #5
0
def main():
    if not os.path.exists(SAVE_DIR):
        os.mkdir(SAVE_DIR)
    mask_filenames = os.listdir(SAMPLE_DIR)
    if "extract" in ARGS.tasks:
        Processor = UniformPreprocessor(IMAGE_W, IMAGE_H, "shp2gir")
        for mask_filename in mask_filenames:
            print("Processing {}".format(mask_filename))
            mask_path = os.path.abspath(os.path.join(SAMPLE_DIR, mask_filename))
            mask_name = mask_filename.split(".")[0]
            mask = cv.imread(mask_path)
            mask, _ = Processor.process_mask(mask, "B")
            processed_mask_vis_path = os.path.join(SAVE_DIR, mask_name + "_processed.png")
            cv.imwrite(processed_mask_vis_path, mask)
            keypoints, keypoints_vis = get_keypoints(mask, mask_name, NUM_KEYPOINTS)
    if "shift" in ARGS.tasks:
        keypoints_filenames = mask_filenames
        for keypoint_filename in keypoints_filenames:
            print("Processing {}".format(keypoint_filename))
            keypoint_path = os.path.abspath(os.path.join(SAMPLE_DIR, keypoint_filename))
            keypoint_name = keypoint_filename.split(".")[0]
            keypoint_vis = cv.imread(keypoint_path)
            keypoint_vis = keypoint_vis[:IMAGE_W, :IMAGE_W, 0]
            keypoints = np.argwhere(keypoint_vis > 250)
            bp()
            shift_keypoints(x, y)
def spamTest():
    docList = []
    classList = []
    fullText = []
    rmax = 25
    for i in range(1, 26):
        wordList = textParse(open('email/spam/%d.txt' % i, 'rb').read())
        #print('spam wordList = ',wordList)
        docList.append(wordList)
        #print('spam docList = ',docList)
        fullText.extend(wordList)
        #print('spam fullText = ',fullText)
        classList.append(1)
        #print('spam classList = ',classList)
        wordList = textParse(open('email/ham/%d.txt' % i, 'rb').read())
        #print('ham wordList = ',wordList)
        docList.append(wordList)
        #print('ham docList = ',docList)
        fullText.extend(wordList)
        #print('ham fullText = ',fullText)
        classList.append(0)
        #print('ham classList = ',classList)
    print('wordList = ', wordList)
    print('docList = ', docList)
    print('fullText = ', fullText)
    print('classList = ', classList)
    vocabList = createVocabList(docList)  #create vocabulary
    print('vocabList = ', vocabList)
    top30Words = calcMostFreq(vocabList, fullText)  #remove top 30 words
    print('top30Words = ', top30Words)
    bp()
    trainingSet = list(range(50))
    testSet = []  #create test set
    for i in range(10):
        #print('len(trainingSet) = ',len(trainingSet))
        randIndex = int(random.uniform(0, len(trainingSet)))
        #print('randIndex = ',randIndex)
        testSet.append(trainingSet[randIndex])
        del (trainingSet[randIndex])
    print('testSet = ', testSet)
    print('trainingSet = ', trainingSet)
    trainMat = []
    trainClasses = []
    for docIndex in trainingSet:  #train the classifier (get probs) trainNB0
        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
        #print('trainMat = ',trainMat)
        trainClasses.append(classList[docIndex])
        #print('trainClasses = ',trainClasses)
    print('trainMat = ', trainMat)
    print('trainClasses = ', trainClasses)
    p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))
    errorCount = 0
    for docIndex in testSet:  #classify the remaining items
        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
        if classifyNB(array(wordVector), p0V, p1V,
                      pSpam) != classList[docIndex]:
            errorCount += 1
            print("classification error", docList[docIndex])
    print('the error rate is: ', float(errorCount) / len(testSet))
Example #7
0
def mqtt_bridge_node():
    # init node
    rospy.init_node('mqtt_bridge_node')

    IoT_protocol_name = "x-amzn-mqtt-ca"
    aws_iot_endpoint = "a33ymm5qqy1bxl.iot.us-east-2.amazonaws.com"  # <random>.iot.<region>.amazonaws.com
    url = "https://{}".format(aws_iot_endpoint)

    ca = "/home/nvidia/Downloads/certs/icstx22/Amazon_Root_CA_1.pem"
    cert = "/home/nvidia/Downloads/certs/icstx22/c9faf68aac-certificate.pem.crt"
    private = "/home/nvidia//Downloads/certs/icstx22/c9faf68aac-private.pem.key"

    # load parameters
    params = rospy.get_param("~", {})
    mqtt_params = params.pop("mqtt", {})
    conn_params = mqtt_params.pop("connection")
    mqtt_private_path = mqtt_params.pop("private_path", "")
    bridge_params = params.get("bridge", [])

    # create mqtt client
    # mqtt_client_factory_name = rospy.get_param(
    #     "~mqtt_client_factory", ".mqtt_client:default_mqtt_client_factory")
    # mqtt_client_factory = lookup_object(mqtt_client_factory_name)
    ssl_context = ssl.create_default_context()
    ssl_context.set_alpn_protocols([IoT_protocol_name])
    ssl_context.load_cert_chain(certfile=cert, keyfile=private)
    ssl_context.load_verify_locations(cafile=ca)

    # load serializer and deserializer
    serializer = params.get('serializer', 'json:dumps')
    deserializer = params.get('deserializer', 'json:loads')

    # dependency injection
    # config = create_config(
    #     mqtt_client, serializer, deserializer, mqtt_private_path)
    # inject.configure(config)

    # configure and connect to MQTT broker
    mqtt_client = mqtt.Client()
    mqtt_client.tls_set_context(context=ssl_context)
    mqtt_client.on_connect = _on_connect
    mqtt_client.on_disconnect = _on_disconnect
    aws_iot_endpoint = "a33ymm5qqy1bxl.iot.us-east-2.amazonaws.com"  # <random>.iot.<region>.amazonaws.com
    bp()
    mqtt_client.connect(aws_iot_endpoint, port=443)

    # configure bridges
    bridges = []
    for bridge_args in bridge_params:
        bridges.append(create_bridge(**bridge_args))

    # start MQTT loop
    mqtt_client.loop_start()

    # register shutdown callback and spin
    rospy.on_shutdown(mqtt_client.disconnect)
    rospy.on_shutdown(mqtt_client.loop_stop)
    rospy.spin()
Example #8
0
def cat_photo_saved(sender, **kwargs):
    cat_photo = kwargs.get("instance")
    MEDIA_ROOT = settings.MEDIA_ROOT

    origin_filename = cat_photo.origin.__str__()
    filename = os.path.basename(origin_filename)
    ext = os.path.splitext(filename)[1]
    photo_dir = os.path.join("%d" % (cat_photo.cat.id % 1000), "%d" % cat_photo.cat.id)
    new_filename = os.path.join("%d%s" % (cat_photo.id, ext))
    abs_photo_dir = os.path.join(MEDIA_ROOT, "img", "photo", photo_dir)

    filename_without_ext = os.path.splitext(filename)[0]
    if re.compile("^\d+$").match(filename_without_ext):
        if int(filename_without_ext) == cat_photo.id:
            return

    try:
        os.makedirs(abs_photo_dir)
    except:
        pass

    abs_origin_path = os.path.join(MEDIA_ROOT, origin_filename)
    abs_new_path = os.path.join(abs_photo_dir, new_filename)

    if os.path.exists(abs_origin_path):
        if os.path.exists(abs_new_path):
            os.unlink(abs_new_path)

        try:
            os.rename(abs_origin_path, abs_new_path)
        except:
            bp()
            pass
    cat_photo.origin = os.path.join("img", "photo", photo_dir, new_filename).replace(os.sep, "/")
    cat_photo.save()

    origin_bitmap = Image.open(os.path.join(abs_photo_dir, new_filename))
    w, h = origin_bitmap.size

    # list_thumbnail = origin_bitmap.copy ( )
    if w > h:
        list_thumbnail = origin_bitmap.crop(((w - h) / 2, 0, h + (w - h) / 2, h))
    else:
        list_thumbnail = origin_bitmap.crop((0, (h - w) / 2, w, w))

    print list_thumbnail.size

    list_thumbnail.thumbnail((220, 220))

    list_thumbnail.save(os.path.join(abs_photo_dir, "%d_%s" % (220, new_filename)))
    # w, h = get_crop_image_size ( ( w, h ) )
    # origin_bitmap = origin_bitmap.crop ( ( 0, 0, w, h ) )
    for THUMBNAIL in getattr(settings, "THUMBNAILS", (480)):
        thumbnail_bitmap = origin_bitmap.copy()
        thumbnail_bitmap.thumbnail((THUMBNAIL, float(THUMBNAIL) / w * h))
        thumbnail_bitmap.save(os.path.join(abs_photo_dir, "%d_%s" % (THUMBNAIL, new_filename)))
def chooseBestSplit(dataSet, leafType=regLeaf, errType=regErr, ops=(1, 4)):
    tolS = ops[0]
    tolN = ops[1]
    print("tolS ", tolS)
    print("tolN ", tolN)
    print("dataSet[:,-1 ", dataSet[:, -1])
    print("len(set(dataSet[:,-1].T.tolist()[0])) ",
          len(set(dataSet[:, -1].T.tolist()[0])))
    #if all the target variables are the same value: quit and return value
    if len(set(dataSet[:, -1].T.tolist()[0])) == 1:  #exit cond 1
        return None, leafType(dataSet)
    m, n = shape(dataSet)
    #the choice of the best feature is driven by Reduction in RSS error from mean
    S = errType(dataSet)
    print("S ", S)
    bestS = inf
    bestIndex = 0
    bestValue = 0

    for featIndex in range(n - 1):
        print("featIndex ", featIndex)
        for splitVal in set(dataSet[:, featIndex].flatten().tolist()[0]):
            print("splitVal ", splitVal)
            mat0, mat1 = binSplitDataSet(dataSet, featIndex, splitVal)
            print("mat0 ", mat0)
            print("mat1 ", mat1)
            print("shape(mat0)[0] ", shape(mat0)[0])
            print("shape(mat1)[0] ", shape(mat1)[0])
            print("mat0 ", tolN)
            bp()
            if (shape(mat0)[0] < tolN) or (shape(mat1)[0] < tolN): continue
            newS = errType(mat0) + errType(mat1)
            print("newS ", newS)
            print("bestS ", bestS)
            if newS < bestS:
                bestIndex = featIndex
                bestValue = splitVal
                bestS = newS
            print("bestIndex ", bestIndex)
            print("bestValue ", bestValue)
            print("bestS ", bestS)
            print("tolS ", tolS)

    #if the decrease (S-bestS) is less than a threshold don't do the split
    if (S - bestS) < tolS:
        return None, leafType(dataSet)  #exit cond 2
    mat0, mat1 = binSplitDataSet(dataSet, bestIndex, bestValue)
    print("-----22222222----------- ")
    print("mat0 ", mat0)
    print("mat1 ", mat1)
    print("shape(mat0)[0] ", shape(mat0)[0])
    print("shape(mat1)[0] ", shape(mat1)[0])
    print("mat0 ", tolN)
    if (shape(mat0)[0] < tolN) or (shape(mat1)[0] < tolN):  #exit cond 3
        return None, leafType(dataSet)
    return bestIndex, bestValue  #returns the best feature to split on
Example #10
0
    def reproject2(self, target_z_dist, rewards, terminates):
        try:
            #next_distr = next_distr_v.data.cpu().numpy()

            rewards = rewards.reshape(-1)
            terminates = terminates.reshape(-1).astype(bool)
            #dones_mask = dones_mask_t.cpu().numpy().astype(np.bool)
            #batch_size = len(rewards)
            proj_distr = np.zeros((self.batch_size, self.n_atoms),
                                  dtype=np.float32)

            #pdb.set_trace()

            for atom in range(self.n_atoms):
                tz_j = np.minimum(
                    self.v_max,
                    np.maximum(
                        self.v_min, rewards +
                        (self.v_min + atom * self.delta) * self.gamma))
                b_j = (tz_j - self.v_min) / self.delta
                l = np.floor(b_j).astype(np.int64)
                u = np.ceil(b_j).astype(np.int64)
                eq_mask = (u == l).astype(bool)
                proj_distr[eq_mask, l[eq_mask]] += target_z_dist[eq_mask, atom]
                ne_mask = (u != l).astype(bool)
                proj_distr[ne_mask,
                           l[ne_mask]] += target_z_dist[ne_mask, atom] * (
                               u - b_j)[ne_mask]
                proj_distr[ne_mask,
                           u[ne_mask]] += target_z_dist[ne_mask, atom] * (
                               b_j - l)[ne_mask]

            if terminates.any():
                proj_distr[terminates] = 0.0
                tz_j = np.minimum(self.v_max,
                                  np.maximum(self.v_min, rewards[terminates]))
                b_j = (tz_j - self.v_min) / self.delta
                l = np.floor(b_j).astype(np.int64)
                u = np.ceil(b_j).astype(np.int64)
                eq_mask = (u == l).astype(bool)
                eq_dones = terminates.copy()
                eq_dones[terminates] = eq_mask
                if eq_dones.any():
                    proj_distr[eq_dones, l] = 1.0
                ne_mask = (u != l).astype(bool)
                ne_dones = terminates.copy()
                ne_dones[terminates] = ne_mask.astype(bool)
                if ne_dones.any():
                    proj_distr[ne_dones, l] = (u - b_j)[ne_mask]
                    proj_distr[ne_dones, u] = (b_j - l)[ne_mask]
        except Exception as e:
            print(e)
            bp()
        return proj_distr
Example #11
0
def download(request, bucket, path):
    query_string = request.META.get("QUERY_STRING")
    no_operator = "" == query_string
    if no_operator:
        key = path
    else:
        operator = ImageThumbnailOperator(query_string)
        operator.process(path, join(QINIU_MEDIA_ROOT, bucket))

        bp()
        key = operator.cache_key(path)

    return serve(request, key, join(QINIU_MEDIA_ROOT, bucket))
Example #12
0
 def replace_tiles_no_threading(self):
     total = len(self.large_image_data)-1
     for index,item in enumerate(self.large_image_data):
         if (index %10 == 0 or index == total) and self.print_progress:
             print('------{:.1%} of mosaic processed ------'.format(index/float(total)),end='\r')
         
         best_tile_index = self.get_best_fit_tile(self.small_image_data[index])
         try:
             self.large_image_data[index] = self.large_tile_data[best_tile_index]
         except:
             bp()
         if self.character_dictionary:
             self.char_matrix.append(self.character_dictionary[best_tile_index])
Example #13
0
    def warmup(self):

        self.ddpg.actor.eval()
        # bp()
        for i in range(5000 // args.max_steps):
            addExperienceToBuffer(self.ddpg,
                                  self.ddpg.replayBuffer,
                                  self.env,
                                  her=args.her,
                                  her_ratio=0.8)
        # bp()
        return

        counter = 0
        state = self.env.reset()
        episode_states = []
        episode_rewards = []
        episode_actions = []
        while counter < args.warmup:

            action = to_numpy(self.ddpg.actor(to_tensor(state.reshape(
                -1))))  #np.random.uniform(-1.0, 1.0, size=act_dim)
            next_state, reward, done, _ = self.env.step(
                np.clip(action + self.ddpg.noise.sample(), -1, 1))

            #### n-steps buffer
            episode_states.append(state)
            episode_actions.append(action)
            episode_rewards.append(reward)

            if len(episode_states) >= args.n_steps:
                cum_reward = 0.
                exp_gamma = 1
                for k in range(-args.n_steps, 0):
                    try:
                        cum_reward += exp_gamma * episode_rewards[k]
                    except:
                        bp()
                    exp_gamma *= args.gamma
                self.ddpg.replayBuffer.add(
                    episode_states[-args.n_steps].reshape(-1),
                    episode_actions[-1], cum_reward, next_state, done)
            if done:
                episode_states = []
                episode_rewards = []
                episode_actions = []
                state = self.env.reset()
            else:
                state = next_state
            counter += 1
Example #14
0
def main():
	#------------------------init--------------------#
	tx_fifo = []
	coded_vector = []
	lanes = [[] for y in range(NLANES)]

	cgmii_module = tx.CgmiiFSM()
	#rx_decoder_module = rx.rx_FSM()
	tx_scrambler_module = tx.Scrambler()
	rx_scrambler_module = tx.Scrambler()
	par_scrambler = ps.ParallelScrambler()
	generator = gen.ScrmGen()
	SEED = np.random.randint(0,2,58)
	#tx_scrambler_module.shift_reg = copy.deepcopy(SEED)
	#rx_scrambler_module.shift_reg = copy.deepcopy(SEED)
	#rx_scrambler = Scrambler()

	for clock in range (0,NCLOCK): # MAIN LOOP
		
		
		tx_raw = cgmii_module.tx_raw #bloque recibido desde cgmii
		#codificacion
		if tx_raw['block_name'] in tx.ENCODER : 
			tx_coded = tx.ENCODER[ tx_raw['block_name'] ]
		else :
			tx_coded = tx.ENCODER['ERROR_BLOCK']
		
		func = generator.scrm_func(tx_coded)
		for i in func :
			print '\n' ,i
		print '\n \n' 
		
	
		coded_vector.append(tx_coded) #solo para debugging
		
		#rx_decoder_module.change_state(tx_coded) #
		
		cgmii_module.change_state(0)
		
		serial = tx_scrambler_module.tx_scrambling(tx_coded)
	
		par = par_scrambler.par_scrambling(tx_coded)
		binpar = ''.join(str(x) for x in par)
		
		print 'serial :  ', bin(serial['payload'])
		print '\n'
		print 'parallel:  ', binpar
		print '\n'
		bp()
Example #15
0
 def log_probs(self, actions, is_sum=True):
     # actions: (batch, coord)
     log_probs = []
     for coord in range(len(self.distributions)):
         try:
             log_probs.append(self.distributions[coord].log_probs(
                 actions[:, coord:coord + 1]))
         except:
             bp()
             log_probs.append(self.distributions[coord].log_probs(
                 actions[:, coord:coord + 1]))
     log_probs = torch.cat(log_probs, dim=1)
     if is_sum:
         return log_probs.sum(-1).unsqueeze(-1)
     else:
         return log_probs
Example #16
0
def probar_Tinv(ts,marca_log):
        sigma = []
        for i in N_TRANSICIONES:
                sigma.append([ts.count(i[1:])])
        bp()

        sigma = numpy.matrix(sigma)

        with open(PETRI_FILE) as fd :
                jobs = json.loads(fd.read())
                matI = numpy.matrix(jobs['petriNet']['incidence_matrix'])
                Mi   = numpy.matrix(jobs['petriNet']['init_marking'])
                Mf   = numpy.transpose((matI * sigma)) + Mi
                Mlog = numpy.matrix(marca_log)
                print('\nMarcado final atravez de invariantes : \n',Mf,'\n')
                print('Marcado final del log :\n', Mlog, '\n')
                print('Comparacion : ', Mlog == Mf)
Example #17
0
def CrimeMain():
    global Param

    ## User Case Preprocessing
    DataPath = "/Users/dueheelee/Documents/PyApp/DataMart/Springleaf/"
    TrainX, TrainY, TestsX, NClass, UniqueTerm = PreProcessing(DataPath)

    bp()

    ## Visualization
    PreVisualization(TrainX, TrainY, NClass, UniqueTerm)

    ## User Parameter Calling
    Param = ParameterSetting(NClass)

    ## Cross Validation Ticket Generation
    FunRMSE, SavePredict = CrossVal(Param, TrainX, TrainY)

    ## Select the Ensemble Function and Weights
    TrainOutput, Weight, BestGroup, BestMachine = Ensemble(
        Param, FunRMSE, SavePredict, TrainY)

    ## The First Final Training
    SaveForecast = FinalTraining(Param, TrainX, TrainY, TestsX, Weight,
                                 BestGroup, BestMachine)

    ## PostProcessing and Printing
    PostPrinting(SaveForecast, UniqueTerm, 'Sub1.csv')

    ## Preparing Second Race
    NewTrainX, NewTestsX = SecondWave(TrainOutput, SaveForecast, TrainX,
                                      TestsX)

    ## The Second Cross Validation
    FunRMSE2, SavePredict2 = CrossVal(Param, NewTrainX, TrainY)

    ## Ensemble Prediction
    TrainOutput2, Weight2, BestGroup2, BestMachine2 = Ensemble(
        Param, FunRMSE2, SavePredict2, TrainY)

    ## The Second Final Training
    SaveForecast2 = FinalTraining(Param, NewTrainX, TrainY, NewTestsX, Weight2,
                                  BestGroup2, BestMachine2)

    ## PostProcessing and Printing
    PostPrinting(SaveForecast2, UniqueTerm, 'Sub2.csv')
Example #18
0
def run():
    train_image_batch, train_label_batch, test_image, test_label = load_images()
    init_op = tf.group(
        tf.local_variables_initializer(),
        tf.global_variables_initializer(),
    )

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(
            sess=sess,
            coord=coord,
        )

        bp()
        pass
Example #19
0
def main():

    #Init
    #Simulation variables
    sh_index = 65  #con 64 se engancha en 64 ciclos de clock
    break_flag = False
    #sh_cnt_limit   = 64 parametro para la fsm
    #sh_invld_limit = 32 parametro para la fsm
    Block_Sync_Module = bs.BlockSyncModule(0)
    Block_Sync_Module.reset()
    random_change = random.randint(70, 233)

    #Files
    block_sync_input = open("block-sync-input.txt", "w")
    block_sync_output = open("block-sync-output.txt", "w")
    block_lock_flag = open("block-lock-flag.txt", "w")

    #main loop
    for clock in range(NCLOCK):

        in_block = gen_block(sh_index)
        in_block = break_sh(in_block, break_flag, sh_index)
        Block_Sync_Module.receive_block(in_block)

        for i in range(5):
            Block_Sync_Module.FSM_change_state()

        out_block = Block_Sync_Module.get_block()

        bin_input = block_to_bin(in_block)
        bin_output = block_to_bin(out_block)
        bin_flag = bin(Block_Sync_Module.block_lock)[2:]

        #format
        bin_input = ''.join(map(lambda x: x + ' ', bin_input))
        bin_output = ''.join(map(lambda x: x + ' ', bin_output))

        block_sync_input.write(bin_input + '\n')
        block_sync_output.write(bin_output + '\n')
        block_lock_flag.write(bin_flag + '\n')

        if ((clock % random_change) == 0):
            sh_index -= 1
        if (sh_index < 0):
            bp()
            break
def getAllCardsInfo():
    board_name = 'Liceo digital'

    all_boards = client.list_boards()
    alldata = getBoardData(board_name, 'CMMEdu', client, '2001-01-01',
                           '2030-01-01')
    all_cards_names = alldata[board_name]['cards']
    batch_counter = 0
    batch_actual = []
    acciones_batch = []
    nombres_tarjetas = []

    all_cards_names = all_cards_names[10:14]
    for j in range(len(all_cards_names)):

        if batch_counter < 10:
            batch_counter += 1
            batch_actual.append(all_cards_names[j].id)

        if batch_counter == 10 or j == len(all_cards_names) - 1:
            # bp()
            acciones_batch_aux = urlRequestCard(batch_actual, 'actions')
            for x in acciones_batch_aux:
                acciones_batch.append(x['200'])
                # bp()
                nombres_tarjetas.append(all_cards_names[j].name)
            batch_counter = 0
            batch_actual = []
    all_cards = acciones_batch
    dates_of_actions_in_cards = []
    for card in all_cards:
        dates_of_this_card = []
        for action in card:
            dates_of_this_card.append({
                'date': action['date'],
                'type': action['type'],
                'id': action['id']
            })
        sorted(dates_of_this_card, key=lambda x: x['date'])
        dates_of_actions_in_cards.append(dates_of_this_card)

    prettyDicts(dates_of_actions_in_cards)
    bp()

    # prettyDicts(all_cards[1])
    return all_cards
Example #21
0
def process_plot_label_strings(indexes, string_container):
    new_strings = []

    if isinstance(string_container, list):
        pass
    else:
        string_container = list(string_container)
    try:
        for item in string_container:
            data = np.array(item.split('_'))
            new_string = ''
            for s in data[indexes]:
                new_string += s + ' '
            new_strings.append(new_string)
    except:
        bp()
    return new_strings
Example #22
0
    def generateMatrix(self, A):

        matrix = [x for x in [0] * A]
        list_items = [i for i in range(A * A, 0, -1)]

        r, l = 0, 0
        m, n = A, A
        while r < m and l < n:

            # fill the first row
            sub_matrix = []
            for i in range(l, n):
                sub_matrix += [list_items.pop()]

            matrix[r] = sub_matrix
            r += 1
            # print(list_items)

            bp()
            print(matrix)
            print('/n/n')
            # fill the last column
            for i in range(r, m):
                print(i)
                print(matrix[i][n - 1])
                matrix[i][n - 1] = list_items.pop()

            matrix[i][n - 1] = sub_matrix
            n -= 1
            print(matrix)
            print('/m/m')

            if r < m:
                # fill the last row
                for i in range(n - 1, l - 1, -1):
                    matrix[m - 1][i] = list_items.pop()
                m -= 1

            if l < n:
                # fill the first column
                for i in range(m - 1, r - 1, -1):
                    matrix[i][l] = list_items.pop()
                m -= 1

        return matrix
Example #23
0
    def setup_solver(self):
        # Call setup_nlp to generate the NLP
        nlp_dict_out = setup_nlp.setup_nlp(self.model, self.optimizer)
        # Set options
        opts = {}
        opts["expand"] = True
        opts["ipopt.linear_solver"] = self.optimizer.linear_solver
        #NOTE: this could be passed as parameters of the optimizer class
        opts["ipopt.max_iter"] = 500
        opts["ipopt.tol"] = 1e-6
        opts["ipopt.print_level"] = 5
        # Setup the solver
        opts["print_time"] = False

        start_time = time.time()
        solver = nlpsol("solver", self.optimizer.nlp_solver,
                        nlp_dict_out['nlp_fcn'], opts)
        elapsed_time = time.time() - start_time
        bp()
        arg = {}
        # Initial condition
        arg["x0"] = nlp_dict_out['vars_init']
        # Bounds on x
        arg["lbx"] = nlp_dict_out['vars_lb']
        arg["ubx"] = nlp_dict_out['vars_ub']
        # Bounds on g
        arg["lbg"] = nlp_dict_out['lbg']
        arg["ubg"] = nlp_dict_out['ubg']
        # NLP parameters
        nu = self.model.u.size(1)
        ntv_p = self.model.tv_p.size(1)
        nk = self.optimizer.n_horizon
        parameters_setup_nlp = struct_symMX(
            [entry("uk_prev", shape=(nu)),
             entry("TV_P", shape=(ntv_p, nk))])
        param = parameters_setup_nlp(0)
        # First value of the nlp parameters
        param["uk_prev"] = self.model.ocp.u0
        param["TV_P"] = self.optimizer.tv_p_values[0]
        arg["p"] = param
        # Add new attributes to the optimizer class
        self.optimizer.solver = solver
        self.optimizer.arg = arg
        self.optimizer.nlp_dict_out = nlp_dict_out
def conv():
    in_channels, out_channels, D = 2, 3, 2
    bp()
    coords, feats, labels = data_loader(in_channels, batch_size=1)

    # Convolution
    input = ME.SparseTensor(feats=feats, coords=coords)
    conv = ME.MinkowskiConvolution(in_channels,
                                   out_channels,
                                   kernel_size=3,
                                   stride=2,
                                   has_bias=False,
                                   dimension=D)

    output = conv(input)

    print('Input:')
    print_sparse_tensor(input)

    print('Output:')
    print_sparse_tensor(output)

    # Convolution transpose and generate new coordinates
    strided_coords, tensor_stride = get_random_coords()
    bp()

    input = ME.SparseTensor(
        feats=torch.rand(len(strided_coords), in_channels),  #
        coords=strided_coords,
        tensor_stride=tensor_stride)
    conv_tr = ME.MinkowskiConvolutionTranspose(in_channels,
                                               out_channels,
                                               kernel_size=3,
                                               stride=2,
                                               has_bias=False,
                                               dimension=D)
    output = conv_tr(input)

    print('\nInput:')
    print_sparse_tensor(input)

    print('Convolution Transpose Output:')
    print_sparse_tensor(output)
def main():
    #--------------------init----------------------#

    #sim variables
    cgmii_module = tx.CgmiiFSM(40, 12)
    idle_del_module = iddel.IdleDeletionModule(NBLOCKS, NLANES)
    tx_encoder_input = []
    tx_raw_input = []

    #files
    idle_del_input_data_file = open("idle-deletion-input-data.txt", "w")
    idle_del_input_ctrl_file = open("idle-deletion-input-ctrl.txt", "w")
    idle_del_output_data_file = open("idle-deletion-output-data.txt", "w")
    idle_del_output_ctrl_file = open("idle-deletion-output-ctrl.txt", "w")

    #-------------simulation begin------------------#

    for clock in range(0, NCLOCK):  # MAIN LOOP

        tx_raw = cgmii_module.tx_raw  #bloque recibido desde cgmii
        #tx_raw_input.append(tx_raw)
        cgmii_module.change_state(0)

        (bin_input_data, bin_input_ctrl) = tb.cgmii_block_to_bin(tx_raw)
        idle_del_module.add_block(tx_raw)

        tx_block = idle_del_module.get_block()
        (bin_output_data, bin_output_ctrl) = tb.cgmii_block_to_bin(tx_block)
        #tx_encoder_input.append(tx_block)
        print '\n\n NAME:', tx_block['block_name']

        #format
        bin_input_data = ''.join(map(lambda x: x + ' ', bin_input_data))
        bin_input_ctrl = ''.join(map(lambda x: x + ' ', bin_input_ctrl))
        bin_output_data = ''.join(map(lambda x: x + ' ', bin_output_data))
        bin_output_ctrl = ''.join(map(lambda x: x + ' ', bin_output_ctrl))
        #write
        idle_del_input_data_file.write(bin_input_data + '\n')
        idle_del_input_ctrl_file.write(bin_input_ctrl + '\n')
        idle_del_output_data_file.write(bin_output_data + '\n')
        idle_del_output_ctrl_file.write(bin_output_ctrl + '\n')

    bp()
Example #26
0
def main():

        with open(LOG_FILE) as fd:
                raw = fd.read()
                raw = re.findall('(?:INFO).*',raw)
                raw = list(map(lambda x : x.replace('INFO: ', ''), raw))
                jslist = []
                for item in raw :
                        jslist.append(json.loads(item))

                #genero lista de disparos y marcados
                transiciones = []
                marcados = []
                for js in jslist :
                        transiciones.append(js['disparo'])
                        marcados.append(js['marcado'])

                for i in N_TRANSICIONES :
                        if transiciones.count(i) < 1:
                                print('La transicion : ', i, ' no se disparo nunca\n')

                for i in N_TRANSICIONES :
                        print('La transicion : ', i, ' se disparo ', transiciones.count(i))

                for i in range(len(transiciones)):
                        if transiciones[i] == 'T1' :
                                transiciones[i] = 'T0'

                
                t_inv_mat = []
                t_inv_mat.append(remover_invariantes(transiciones,['T7','T0','T2','T3','T4','T15','T5','T6']))
                #t_inv_mat.append(remover_invariantes(transiciones,['T8','T10','T9','T11']))
                t_inv_mat.append(remover_invariantes(transiciones,['T13','T12','T14']))
                clean = []
                for i in t_inv_mat:
                        bp()
                        clean += i.split('T')
                for i in range(len(transiciones)) :
                        if transiciones[i] in ['T7','T0','T2','T3','T4','T15','T5','T6','T13','T12','T14'] :
                                transiciones[i] = ''

                clean += ''.join(transiciones).split('T')
                probar_Tinv(clean, marcados[-1])
def binary_search(arr, target):
    if len(arr) < 1:
        return -1

    low = 0
    high = len(arr) - 1

    while low <= high:
        bp()
        middle = (high + low) // 2
        if arr[middle] == target:
            return middle
        else:
            if target > arr[middle]:
                low = middle + 1
            elif target < arr[middle]:
                high = middle - 1

    return -1
Example #28
0
def main():
    #simulation variables
    Am_Lock_Module = am.AlignMarkerLockModule(AM_PERIOD, AM_INV)
    tx_block = []
    rx_block = []

    #file variables
    am_lock_input = open("am-lock-input-file.txt", "w")
    am_lock_output = open("am-lock-output-file.txt", "w")
    am_lock_flag = open("am-lock-flag-file.txt", "w")

    for clock in range(N_CLOCK):

        block = {'block_name': 'DATA BLOCK', 'payload': 0x10000000000000000}

        block['payload'] = random.getrandbits(64)  #random payload
        block['payload'] |= 3 << 65
        # sh

        if (clock % AM_PERIOD) == 0 and clock > 0:
            block['block_name'] = 'ALIGNER'
            block['payload'] = align_marker_list[PHY_LANE_ID]

        tx_block.append(block)
        bp()
        Am_Lock_Module.receive_block(block)
        Am_Lock_Module.FSM_change_state(True, block)
        recv_block = Am_Lock_Module.get_block()
        #rx_block.append(recv_block)

        #bin convert
        bin_input_block = block_to_bin(block)
        bin_output_block = block_to_bin(recv_block)
        bin_am_flag = bin(Am_Lock_Module.am_lock)[2:]
        #format

        bin_input_block = ''.join(map(lambda x: x + ' ', bin_input_block))
        bin_output_block = ''.join(map(lambda x: x + ' ', bin_input_block))

        am_lock_input.write(bin_input_block + '\n')
        am_lock_output.write(bin_output_block + '\n')
        am_lock_flag.write(bin_am_flag + '\n')
Example #29
0
 def transformState(self,rawAgentState,stateCollection,featuretransformargs):
     #print 'Transform state'
     agentState= np.append(rawAgentState[0:10] ,rawAgentState [10+2*(self.teamSize-1):10+3*(self.teamSize-1)])
     #print "Agent State: "+str(agentState)
     teamState=[]
     #Weird Synchronization fix
     stateCollection = [x for x in stateCollection if type(x)!=type(1)]
     for u in range(len(stateCollection)):
         try:
             if stateCollection[u][0]!=agentState[0] or stateCollection[u][1]!=agentState[1]:
                 teamState +=  stateCollection[u][0:10].tolist()
         except:
             #print stateCollection
             #print u
             #print stateCollection[u]\
             #print "Inside Bad"
             bp()
     #print "team state is as :"+str(teamState)
     opponentState= rawAgentState [10+6*(self.teamSize-1)-1:-1]
     return agentState,teamState,opponentState
def generateRules(L,
                  supportData,
                  minConf=0.7):  #supportData is a dict coming from scanD
    bigRuleList = []
    print("----------------------------------")
    print("len(L)", len(L))
    print("L", L)
    print("supportData", supportData)
    print("minConf", minConf)
    print("----------------------------------")
    for i in range(1, len(L)):  #only get the sets with two or more items
        print("i", i)
        for freqSet in L[i]:
            print("freqSet", freqSet)
            H1 = [frozenset([item]) for item in freqSet]
            print("H1 = ", H1)
            if (i > 1):
                rulesFromConseq(freqSet, H1, supportData, bigRuleList, minConf)
            else:
                calcConf(freqSet, H1, supportData, bigRuleList, minConf)
        bp()
    return bigRuleList
Example #31
0
 def rhs(self, evolution_vector):
     """
     The ODE *right hand side* in ``dy / dt = f(y)``. ``y`` is a numpy vector,
     and ``f(y)`` returns a similarly sized (numpy) vector which we call ``rhs`` here:
     
     >>> ode = to_scipy(State({ Symbol("x"): Symbol("int")(Symbol("x"),0.1,1) }))
     >>> y1 = ode.y0 + ode.rhs(ode.y0) * ode.dt   # perform some euler integration step
     >>> y1
     array([0.9])
     
     Usually, you want to pass this function to some scipy integrator. See
     also :meth:`ft`.
     """
     values = self.evaluate_state(evolution_vector)
     dqdt = [
         evaluate_values(self.state[var], values)
         for var in self.vars.evolved
     ]  # gather
     if np.any(np.isnan(np.array(dqdt))):
         from pdb import set_trace as bp
         bp()
     return np.array(dqdt)
Example #32
0
def PrepareRandomWalk(ParameterJsonFile = None, diameter = 100, num_particles = 1, frames = 100, RatioDroppedFrames = 0, EstimationPrecision = 0, mass = 0, frames_per_second = 100, microns_per_pixel = 1, temp_water = 293, visc_water = 9.5e-16):
    """ configure the parameters for a randowm walk out of a JSON file, and generate 
    it in a DataFrame
    """
    
    if ParameterJsonFile != None:
        #read para file if existing
        settings = nd.handle_data.ReadJson(ParameterJsonFile)    
        
        diameter            = settings["Simulation"]["DiameterOfParticles"]
        num_particles       = settings["Simulation"]["NumberOfParticles"]
        frames              = settings["Simulation"]["NumberOfFrames"]
        RatioDroppedFrames  = settings["Simulation"]["RatioDroppedFrames"]
        EstimationPrecision = settings["Simulation"]["EstimationPrecision"]
        mass                = settings["Simulation"]["mass"]        
        
        frames_per_second   = settings["Exp"]["fps"]
        microns_per_pixel   = settings["Exp"]["Microns_per_pixel"]
        temp_water          = settings["Exp"]["Temperature"]


        solvent = settings["Exp"]["solvent"]
        
        if settings["Exp"]["Viscosity_auto"] == 1:
            visc_water = nd.handle_data.GetViscocity(temperature = temp_water, solvent = solvent)
            bp()
        else:
            visc_water = settings["Exp"]["Viscosity"]

    
    output = GenerateRandomWalk(diameter, num_particles, frames, frames_per_second, RatioDroppedFrames = RatioDroppedFrames, ep = EstimationPrecision, mass = mass,                                               microns_per_pixel = microns_per_pixel, temp_water = temp_water, visc_water = visc_water)
            
          
    if ParameterJsonFile != None:
        # write if para file is given
        nd.handle_data.WriteJson(ParameterJsonFile, settings) 

    return output
Example #33
0
def write_string_body(root, depth):
    if isinstance(root, FList):
        g.nesting += 1
        for child in root:
            write_string_body(child, depth)
        g.nesting -= 1
        write_sep("\n", pos=root.pos)
    elif isinstance(root, FQuote):
        write_sep("pushf:", pos=root.pos)
        write_sep(root.func_name, pos=root.pos)
    elif isinstance(root, FCall):
        if str(root) in ["return", "return_two"]:
            write_sep("%s%s" % (root, depth), pos=root.pos)
        else:
            write_sep(str(root), pos=root.pos)
    elif isinstance(root, FStr):
        write_sep(root.str, pos=root.pos)
    elif isinstance(root, FComment):
        pass
    elif root is None:
        pass
    else:
        bp()
Example #34
0
def ibp_stub():
    bp()
#### Check for a name inside a python library ####
import cv2
print [x for x in dir(cv2) if x.startswith('COLOR_')]

#### Set python breakpoint  as bp() ####
from pdb import set_trace as bp
bp()

#### Print the whole nump array ####
import numpy
numpy.set_printoptions(threshold=numpy.nan)

#Check if two points in a list are "close" to each other and make condition false after certain threshold
condition=True
dup1 = len([line for line in img1_pts if  100>np.absolute([np.subtract(line,(x1,y1))[0]+np.subtract(line,(x1,y1))[1]*1j]) ])
dup2 = len([line for line in img2_pts if 100>np.absolute([np.subtract(line,(x2,y2))[0]+np.subtract(line,(x2,y2))[1]*1j]) ])
if (dup1>1 or dup2>1):
  condition = False
  
#Try to check if file is empty, Except create an empty file
try:
  os.stat(file).st_size == 0
except:
  open(file, 'w').close()

#Open and read numerical data in file with Numpy
cf = open(file,'r')
cluster_centers_ = np.loadtxt(cf)    
cf.close()

Example #36
0
 def test_encode_multipart_data(self):
     test = encode_multipart_data({'bar': 42}, ('myfile', 'inputs/test.csv', open('inputs/test.csv', 'rb')))
     bp()
    map = Basemap(
        projection = 'cyl',
        lon_0=-122.44576,
        lat_0=37.752303,
        llcrnrlon=-122.52469,
        llcrnrlat=37.69862,
        urcrnrlon=-122.33663,
        urcrnrlat=37.82986,
        resolution = 'h')
        
    mapdata=np.loadtxt('sf_map_copyright_openstreetmap_contributors.txt')
    plt.imshow(mapdata, cmap = plt.get_cmap('gray'), extent=[-122.52469, -122.33663, 37.69862, 37.82986])
    plt.title(i)    
    plt.show()
      
    bp() #use quit() to quit ipdb status, use c to continue

#%% convert variables
x1=pd.get_dummies(df[['DayOfWeek','PdDistrict','Resolution','Descript']])
#x1=pd.get_dummies(df[['DayOfWeek','PdDistrict']])

X=np.hstack((x1,df[['Hour','month','year']]))

from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
y = le.fit_transform(y) #transform original target to numbers


#%% split to training and testing data
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size = .3)