示例#1
0
    def render_video(self, filename):
        print('Rendering video {0:s}.mp4'.format(filename))
        print('Computing grid embedding')
        reducer = umap.UMAP(n_jobs=psutil.cpu_count(logical=True))
        grid_embedding = reducer.fit_transform(self.grid)

        print('Computing trajectory embedding')
        bar = ProgressBar(len(self.trajectory), max_width=40)
        for i, t in enumerate(self.trajectory):
            states, actions, errors = t
            self.compute_trajectory_embedding(reducer, states, actions)
            bar.numerator = i
            print(bar)

        print('Rendering frames')
        bar = ProgressBar(len(self.trajectory), max_width=40)
        for i, t in enumerate(self.trajectory):
            states, actions, errors = t
            self.render_frame(i, grid_embedding, self.trajectory_embedding,
                              errors)
            bar.numerator = i
            print(bar)

        print('Saving file {0}'.format(filename + '.mp4'))
        imageio.mimsave(filename + '.mp4', self.images, fps=5)
示例#2
0
    def load_all_data(self, begin_date, end_date):
        con = sqlite3.connect('../data/stock.db')
        code_list = con.execute(
            "SELECT name FROM sqlite_master WHERE type='table'").fetchall()
        X_data_list, Y_data_list, DATA_list = [0] * 10, [0] * 10, [0] * 10
        idx = 0
        split = int(len(code_list) / 9)
        bar = ProgressBar(len(code_list), max_width=80)
        for code in code_list:
            data = self.load_data(code[0], begin_date, end_date)
            data = data.dropna()
            X, Y = self.make_x_y(data, code[0])
            if len(X) <= 1: continue
            code_array = [code[0]] * len(X)
            assert len(X) == len(data.loc[29:len(data) - 6, '일자'])
            if idx % split == 0:
                X_data_list[int(idx / split)] = list(X)
                Y_data_list[int(idx / split)] = list(Y)
                DATA_list[int(idx / split)] = np.array([
                    data.loc[29:len(data) - 6, '일자'].values.tolist(),
                    code_array, data.loc[29:len(data) - 6,
                                         '현재가'], data.loc[34:len(data), '현재가']
                ]).T.tolist()
            else:
                X_data_list[int(idx / split)].extend(X)
                Y_data_list[int(idx / split)].extend(Y)
                DATA_list[int(idx / split)].extend(
                    np.array([
                        data.loc[29:len(data) - 6, '일자'].values.tolist(),
                        code_array, data.loc[29:len(data) - 6, '현재가'],
                        data.loc[34:len(data), '현재가']
                    ]).T.tolist())
            bar.numerator += 1
            print("%s | %d" % (bar, len(X_data_list[int(idx / split)])),
                  end='\r')
            sys.stdout.flush()
            idx += 1
        print("%s" % bar)

        print("Merge splited data")
        bar = ProgressBar(10, max_width=80)
        for i in range(10):
            if type(X_data_list[i]) == type(1):
                continue
            if i == 0:
                X_data = X_data_list[i]
                Y_data = Y_data_list[i]
                DATA = DATA_list[i]
            else:
                X_data.extend(X_data_list[i])
                Y_data.extend(Y_data_list[i])
                DATA.extend(DATA_list[i])
            bar.numerator = i + 1
            print("%s | %d" % (bar, len(DATA)), end='\r')
            sys.stdout.flush()
        print("%s | %d" % (bar, len(DATA)))
        return np.array(X_data), np.array(Y_data), np.array(DATA)
示例#3
0
    def load_all_data(self, begin_date, end_date):
        #con = sqlite3.connect('../data/stock.db')
        #code_list = con.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
        code_list = glob.glob('../data/hdf/*.hdf')
        code_list = list(map(lambda x: x.split('.hdf')[0][-6:], code_list))
        X_data_list, Y_data_list, DATA_list = [0]*10, [0]*10, [0]*10
        idx = 0
        split = int(len(code_list) / 9)
        bar = ProgressBar(len(code_list), max_width=80)
        for code in code_list:
            data = self.load_data(code, begin_date, end_date)
            if data is None or len(data) == 0:
                continue
            data = data.dropna()
            len_data = len(data)
            X, Y = self.make_x_y(data, code)
            if len(X) <= 10: continue
            mean_velocity = int(data.loc[len_data-10:len_data,'현재가'].mean()) * int(data.loc[len_data-10:len_data, '거래량'].mean())
            #print("mean velocity: %d" % mean_velocity)
            if mean_velocity < 1000000000 or mean_velocity < 10000000: # 10억 이하면 pass
                continue
            code_array = [code] * len(X)
            assert len(X) == len(data.loc[29:len(data)-self.predict_dist-1, '일자'])
            if idx%split == 0:
                X_data_list[int(idx/split)] = list(X)
                Y_data_list[int(idx/split)] = list(Y)
                DATA_list[int(idx/split)] = np.array([data.loc[29:len(data)-6, '일자'].values.tolist(), code_array, data.loc[29:len(data)-6, '현재가'], data.loc[34:len(data), '현재가'], data.loc[30:len(data)-5, '시가']]).T.tolist()
            else:
                X_data_list[int(idx/split)].extend(X)
                Y_data_list[int(idx/split)].extend(Y)
                DATA_list[int(idx/split)].extend(np.array([data.loc[29:len(data)-6, '일자'].values.tolist(), code_array, data.loc[29:len(data)-6, '현재가'], data.loc[34:len(data), '현재가'], data.loc[30:len(data)-5, '시가']]).T.tolist())
            bar.numerator += 1
            print("%s | %d" % (bar, len(X_data_list[int(idx/split)])), end='\r')
            sys.stdout.flush()
            idx += 1
        print("%s" % bar)

        print("Merge splited data")
        bar = ProgressBar(10, max_width=80)
        for i in range(10):
            if type(X_data_list[i]) == type(1):
                continue
            if i == 0:
                X_data = X_data_list[i]
                Y_data = Y_data_list[i]
                DATA = DATA_list[i]
            else:
                X_data.extend(X_data_list[i])
                Y_data.extend(Y_data_list[i])
                DATA.extend(DATA_list[i])
            bar.numerator = i+1
            print("%s | %d" % (bar, len(DATA)), end='\r')
            sys.stdout.flush()
        print("%s | %d" % (bar, len(DATA)))
        return np.array(X_data), np.array(Y_data), np.array(DATA)
def get_film_reviews(root_url, urls, max_reviews_per_film=None):

    allocine_dic = defaultdict(list)
    bar = ProgressBar(len(urls), max_width=40)

    for i, url in enumerate(urls):
        # Log progress
        bar.numerator = i + 1
        print(bar, end='\r')
        sys.stdout.flush()

        film_id = re.findall(r'\d+', url)[0]
        film_url = "{root}/film/fichefilm-{film_id}/critiques/spectateurs".format(
            root=root_url, film_id=film_id)

        parse_output = parse_film(film_url, max_reviews_per_film)

        if parse_output:
            ratings, reviews, dates, helpfuls = parse_output

            # Rarely happens
            if not (len(ratings) == len(reviews) == len(dates) ==
                    len(helpfuls)):
                print("Error: film-url: " + film_url)
                continue

            allocine_dic['film-url'].extend(len(ratings) * [film_url])
            allocine_dic['rating'].extend(ratings)
            allocine_dic['review'].extend(reviews)
            allocine_dic['date'].extend(dates)
            allocine_dic['helpful'].extend([h[0] for h in helpfuls])
            allocine_dic['unhelpful'].extend([h[1] for h in helpfuls])

    return allocine_dic
示例#5
0
    def load_features(features_fns):
        """
        Load object features from an HDF5 file.
        """
        for features_fn in features_fns:
            print("Loading {}...".format(features_fn))
            with h5py.File(features_fn, "r", libver="latest"
                           ) as f_features, database.engine.begin() as conn:
                object_ids = f_features["object_id"]
                vectors = f_features["features"]

                stmt = (models.objects.update().where(
                    models.objects.c.object_id == bindparam(
                        "_object_id")).values({"vector": bindparam("vector")}))

                bar = ProgressBar(len(object_ids), max_width=40)
                obj_iter = iter(zip(object_ids, vectors))
                while True:
                    chunk = tuple(itertools.islice(obj_iter, 1000))
                    if not chunk:
                        break
                    conn.execute(
                        stmt,
                        [{
                            "_object_id": str(object_id),
                            "vector": vector
                        } for (object_id, vector) in chunk],
                    )

                    bar.numerator += len(chunk)
                    print(bar, end="\r")
                print()
                print("Done.")
def get_film_urls(root_url, max_page=None):
    list_url = "{root}/films".format(root=root_url)
    r = requests.get(list_url)
    soup = BeautifulSoup(r.text, 'html.parser')

    pagination = soup.find("div", {"class": "pagination-item-holder"})
    pages = pagination.find_all("span")
    page_number = int([page.text for page in pages][-1])

    if max_page:
        if max_page > page_number:
            print("Error: max_page is greater than the actual number of pages")
            return []
        else:
            page_number = max_page

    out_urls = []
    bar = ProgressBar(page_number, max_width=40)

    for page_id in range(1, page_number + 1):
        # Log progress
        bar.numerator = page_id
        print(bar, end='\r')
        sys.stdout.flush()

        # Extend out list with new urls
        page_url = "{list_url}/?page={page_num}".format(list_url=list_url,
                                                        page_num=page_id)
        film_urls = parse_list_page(page_url)
        out_urls.extend(film_urls)

    return out_urls
示例#7
0
    def __call__(self):

        bar = ProgressBar(len(self.sourceImages), max_width=int(50))
        counter = 0
        for IC, i in enumerate(self.sourceImages):
            #print(i)
            tarImgPath = extractFileName(i)
            #print(self.targetPath+tarImgPath)
            img = cv2.imread(i)
            linImg = cv2.imread(self.targetPath+tarImgPath)
            #print(img.shape)
            imgTemp = img[:img.shape[0]- self.patchSize, : img.shape[1]- self.patchSize]
            for i in range(0, imgTemp.shape[0],  self.patchSize):
                for j in range(0, imgTemp.shape[1],  self.patchSize):
                    patch = img[i:i+ self.patchSize, j:j+ self.patchSize, :]
                    LinRGB = linImg[i:i+ self.patchSize, j:j+ self.patchSize, :]
                    sampledLinRGB = quadBayerSampler(LinRGB)
                    #print (patch.shape)
                    cv2.imwrite(self.pathGTPatch+str(counter)+".png", patch)
                    #cv2.imwrite(self.pathGTPatch+str(counter)+"_lRGB.png", LinRGB)
                    cv2.imwrite(self.pathQBPatch+str(counter)+".png", sampledLinRGB)
                    counter += 1
            if IC % 2 == 0:
                bar.numerator = IC
                print(Fore.CYAN + "Image Processd |", bar,Fore.CYAN, end='\r')
            
        print ("\n Patch Extracted:", counter)
示例#8
0
    def modelInference(self, testImagesPath = None, outputDir = None, resize = None, validation = None, noiseSet = None, steps = None):
        if not validation:
            self.modelLoad()
            print("\nInferencing on pretrained weights.")
        else:
            print("Validation about to begin.")
        if not noiseSet:
            noiseSet = self.noiseSet
        if testImagesPath:
            self.testImagesPath = testImagesPath
        if outputDir:
            self.resultDir = outputDir
        

        modelInference = inference(gridSize=self.binnigFactor, inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)

        testImageList = modelInference.testingSetProcessor()
        barVal = ProgressBar(len(testImageList) * len(noiseSet), max_width=int(50))
        imageCounter = 0
        with torch.no_grad():
            for noise in noiseSet:
                #print(noise)
                for imgPath in testImageList:
                    img = modelInference.inputForInference(imgPath, noiseLevel=noise).to(self.device)
                    output = self.attentionNet(img)
                    modelInference.saveModelOutput(output, imgPath, noise, steps)
                    imageCounter += 1
                    if imageCounter % 2 == 0:
                        barVal.numerator = imageCounter
                        print(Fore.CYAN + "Image Processd |", barVal,Fore.CYAN, end='\r')
        print("\n")
示例#9
0
文件: run.py 项目: praeclarumjj3/OLIE
def eval(model, dataloader):

    model.eval()

    running_loss = []
    total = len(dataloader)
    bar = ProgressBar(total, max_width=80)
    logger.info("Starting Evaluation")
    with torch.no_grad():
        for i, data in tqdm(enumerate(dataloader, 0)):
            bar.numerator = i + 1
            print(bar, end='\r')

            inputs = data
            outputs = model(inputs)
            loss = edit_loss(outputs, inputs)

            running_loss.append(loss.item())
            sys.stdout.flush()

    avg_loss = np.mean(running_loss)
    print("Eval Loss: {}".format(avg_loss))

    plt.plot(np.linspace(1, total, total).astype(int), running_loss)
    if not os.path.exists('losses/'):
        os.makedirs('losses/')
    plt.savefig('losses/eval_loss_{}.png'.format(args.lr))
def test_defined():
    progress_bar = ProgressBar(2000)

    assert '  0% (    0/2,000) [       ] eta --:-- /' == str(progress_bar)
    assert '  0% (    0/2,000) [       ] eta --:-- -' == str(progress_bar)
    assert '  0% (    0/2,000) [       ] eta --:-- \\' == str(progress_bar)

    eta._NOW = lambda: 1411868722.0
    progress_bar.numerator = 102
    assert '  5% (  102/2,000) [       ] eta --:-- |' == str(progress_bar)
    assert '  5% (  102/2,000) [       ] eta --:-- /' == str(progress_bar)

    eta._NOW = lambda: 1411868722.5
    progress_bar.numerator = 281
    assert ' 14% (  281/2,000) [       ] eta 00:05 -' == str(progress_bar)

    eta._NOW = lambda: 1411868723.0
    progress_bar.numerator = 593
    assert ' 29% (  593/2,000) [##     ] eta 00:03 \\' == str(progress_bar)

    eta._NOW = lambda: 1411868723.5
    progress_bar.numerator = 1925
    assert ' 96% (1,925/2,000) [###### ] eta 00:01 |' == str(progress_bar)

    eta._NOW = lambda: 1411868724.0
    progress_bar.numerator = 1999
    assert ' 99% (1,999/2,000) [###### ] eta 00:01 /' == str(progress_bar)

    eta._NOW = lambda: 1411868724.5
    progress_bar.numerator = 2000
    assert '100% (2,000/2,000) [#######] eta 00:00 -' == str(progress_bar)
    assert '100% (2,000/2,000) [#######] eta 00:00 \\' == str(progress_bar)
    assert '100% (2,000/2,000) [#######] eta 00:00 |' == str(progress_bar)
示例#11
0
def compute_edit_distance(edp: EditDistanceParams,
                          bitmap_cls: Type[screen.Bitmap],
                          nominal_colours: Type[colours.NominalColours]):
    """Computes edit distance matrix between all pairs of pixel strings.

    Enumerates all possible values of the masked bit representation from
    bitmap_cls (assuming it is contiguous, i.e. we enumerate all
    2**bitmap_cls.MASKED_BITS values).  These are mapped to the dot
    representation, turned into coloured pixel strings, and we compute the
    edit distance.

    The effect of this is that we precompute the effect of storing all possible
    byte values against all possible screen backgrounds (e.g. as
    influencing/influenced by neighbouring bytes).
    """

    bits = bitmap_cls.MASKED_BITS

    bitrange = np.uint64(2**bits)

    edit = []
    for _ in range(len(bitmap_cls.BYTE_MASKS)):
        edit.append(
            np.zeros(shape=np.uint64(bitrange * bitrange), dtype=np.uint16))

    # Matrix is symmetrical with zero diagonal so only need to compute upper
    # triangle
    bar = ProgressBar((bitrange * (bitrange - 1)) / 2, max_width=80)

    num_dots = bitmap_cls.MASKED_DOTS

    cnt = 0
    for i in range(np.uint64(bitrange)):
        for j in range(i):
            cnt += 1

            if cnt % 10000 == 0:
                bar.numerator = cnt
                print(bar, end='\r')
                sys.stdout.flush()

            pair = (np.uint64(i) << bits) + np.uint64(j)

            for o, ph in enumerate(bitmap_cls.PHASES):
                first_dots = bitmap_cls.to_dots(i, byte_offset=o)
                second_dots = bitmap_cls.to_dots(j, byte_offset=o)

                first_pixels = pixel_string(
                    colours.dots_to_nominal_colour_pixel_values(
                        num_dots, first_dots, nominal_colours, init_phase=ph))
                second_pixels = pixel_string(
                    colours.dots_to_nominal_colour_pixel_values(
                        num_dots, second_dots, nominal_colours, init_phase=ph))
                edit[o][pair] = edit_distance(edp,
                                              first_pixels,
                                              second_pixels,
                                              error=False)

    return edit
示例#12
0
def plot_m2_model_details(data, path, window=1000):
    bar = ProgressBar(data['re'].shape[0], max_width=40)
    num_rows = 2
    num_cols = 2
    for i in range(data['re'].shape[0]):
        fig = plt.figure(figsize=(num_cols * 7.00, num_rows * 7.00))
        ax = plt.subplot(num_rows, num_cols, 1)
        ax.set_xlabel('steps')
        ax.set_ylabel('reward')
        ax.grid()

        t = range(data['re'].shape[1])

        mu, sigma = prepare_data(data['re'][i], window)
        plot_curve(ax, mu, sigma, t, 'blue')
        mu, sigma = prepare_data(data['ri'][i], window)
        plot_curve(ax, mu, sigma, t, 'red')
        plt.legend(['external reward', 'internal reward'], loc=4)

        ax = plt.subplot(num_rows, num_cols, 2)
        ax.set_xlabel('steps')
        ax.set_ylabel('weight')
        ax.grid()

        t = range(data['re'].shape[1])

        mu, sigma = prepare_data(data['m2w'][i][:, 0], window)
        plot_curve(ax, mu, sigma, t, 'blue')
        mu, sigma = prepare_data(data['m2w'][i][:, 1], window)
        plot_curve(ax, mu, sigma, t, 'red')
        plt.legend(['curiosity reward', 'familiarity reward'], loc=4)

        ax = plt.subplot(num_rows, num_cols, 3)
        ax.set_xlabel('steps')
        ax.set_ylabel('error')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()

        t = range(data['fme'].shape[1])

        mu, sigma = prepare_data(data['fme'][i], window)
        plot_curve(ax, mu, sigma, t, 'green')
        plt.legend(['prediction error'], loc=1)

        ax = plt.subplot(num_rows, num_cols, 4)
        ax.set_xlabel('reward magnitude')
        ax.set_ylabel('log count')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()
        bins = np.linspace(0, 1, 50)
        ax.hist(data['fme'][i], bins, color='darkcyan')
        plt.legend(['prediction error reward'], loc=1)

        plt.savefig("{0:s}_{1:d}.png".format(path, i))
        plt.close()

        bar.numerator = i + 1
        print(bar)
示例#13
0
def plot_forward_inverse_model_details(data, path, window=1000):
    bar = ProgressBar(data['re'].shape[0], max_width=40)
    num_rows = 3
    num_cols = 2
    for i in range(data['re'].shape[0]):
        fig = plt.figure(figsize=(num_cols * 7.00, num_rows * 7.00))
        ax = plt.subplot(num_rows, num_cols, 1)
        ax.set_xlabel('steps')
        ax.set_ylabel('reward')
        ax.grid()

        t = range(data['re'].shape[1])

        mu, sigma = prepare_data(data['re'][i], window)
        plot_curve(ax, mu, sigma, t, 'blue')
        mu, sigma = prepare_data(data['ri'][i], window)
        plot_curve(ax, mu, sigma, t, 'red')
        plt.legend(['external reward', 'internal reward'], loc=4)

        ax = plt.subplot(num_rows, num_cols, 3)
        ax.set_xlabel('steps')
        ax.set_ylabel('error')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()

        t = range(data['fme'].shape[1])
        mu, sigma = prepare_data(data['fme'][i], window)
        plot_curve(ax, mu, sigma, t, 'green')

        t = range(data['ime'].shape[1])
        mu, sigma = prepare_data(data['ime'][i], window)
        plot_curve(ax, mu, sigma, t, 'orchid')
        plt.legend(['prediction model error', 'inverse model error'], loc=1)

        ax = plt.subplot(num_rows, num_cols, 5)
        ax.set_xlabel('reward magnitude')
        ax.set_ylabel('log count')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()
        bins = np.linspace(0, 1, 50)
        ax.hist(data['fme'][i], bins, color='darkcyan')
        plt.legend(['prediction error reward'], loc=1)

        if 'sdm' in data.keys() and 'ldm' in data.keys():
            ax = plt.subplot(num_rows, num_cols, 2)
            c = ax.pcolormesh(data['sdm'][i], cmap='Reds')
            fig.colorbar(c, ax=ax)

            ax = plt.subplot(num_rows, num_cols, 4)
            c = ax.pcolormesh(data['ldm'][i], cmap='Blues')
            fig.colorbar(c, ax=ax)

        plt.savefig("{0:s}_{1:d}.png".format(path, i))
        plt.close()

        bar.numerator = i + 1
        print(bar)
示例#14
0
def styleEditor(imList, maskList, bgList, targetRoot, invertedEditor=False):

    if invertedEditor == False:
        invertedEditorCounter = 1
    else:
        invertedEditorCounter = 2

    imList = imageList(imRoot)
    maskList = imageList(maskRoot)
    bgList = imageList(bgRoot)
    counter = 0
    bar = ProgressBar(len(imList) * len(maskList) * len(bgList) *
                      invertedEditorCounter,
                      max_width=int(10))
    for i in imList:
        for m in maskList:
            for b in bgList:

                mask = cv2.imread(m)
                bg = cv2.imread(b)
                im = cv2.imread(i)

                # Resizing Images
                maskResize = cv2.resize(mask, (im.shape[1], im.shape[0]))
                bgResize = cv2.resize(bg, (im.shape[1], im.shape[0]))

                # Making Mask
                mkIverted = 255 - maskResize

                # Alpha Blending
                blendImage = cv2.bitwise_and(im, mkIverted)
                bgBlend = cv2.bitwise_and(bgResize, maskResize)
                finalBelnding = cv2.bitwise_or(bgBlend.copy(),
                                               blendImage.copy())
                counter += 1
                # Exporting Image
                cv2.imwrite(targetRoot + "styleImage_" + str(counter) + ".png",
                            finalBelnding)

                if invertedEditor == True:
                    # Inverted Blending
                    invertedBelnding = cv2.bitwise_and(im, bgResize)
                    invertedBelnding = cv2.bitwise_and(invertedBelnding,
                                                       mkIverted)
                    counter += 1

                    # Exporting Image
                    cv2.imwrite(
                        targetRoot + "invertedStyleImage_" + str(counter) +
                        ".png", invertedBelnding)

                # Logger
                if counter % 2 == 0:
                    bar.numerator = counter
                    print(Fore.CYAN + "Processd |", bar, Fore.CYAN, end='\r')

    print("\nImage edited {}".format(counter))
示例#15
0
    def fit(self, dataset, n_epoch=100, epoch_size=5000):

        init_op = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        test_x, test_y, test_seq_len = dataset.get_test_data()
        acc = 0
        with tf.Session(config=config) as sess:
            #shutil.rmtree('/tmp/TF/MNIST')
            self.sw_train = tf.summary.FileWriter('/tmp/TF/MNIST/train',
                                                  sess.graph)
            self.sw_test = tf.summary.FileWriter('/tmp/TF/MNIST/test')
            sess.run(init_op)
            for i in range(n_epoch):
                print('Epoch %d/%d' % (i + 1, n_epoch))
                bar = ProgressBar(int(epoch_size / batch_size), max_width=80)
                for j in range(int(epoch_size / batch_size)):
                    batch_x, batch_y, batch_seq_len = dataset.next_batch(
                        batch_size)
                    assert batch_x.shape[0] == batch_size and batch_y.shape[
                        0] == batch_size and batch_seq_len.shape[
                            0] == batch_size
                    summary, _, cost = sess.run(
                        [self.merged, self.train_op, self.cost],
                        feed_dict={
                            self.X: batch_x,
                            self.Y: batch_y,
                            self.Seq_len: batch_seq_len,
                            self.batch_size: batch_size
                        })
                    self.sw_train.add_summary(
                        summary,
                        i * int(epoch_size / batch_size) + j)
                    bar.numerator = j + 1
                    print("%s | loss: %f | test_acc: %.2f" %
                          (bar, cost, acc * 100),
                          end='\r')
                    sys.stdout.flush()
                    if j % 100 == 0:
                        summary, cost, acc = sess.run(
                            [self.merged, self.cost, self.accuracy],
                            feed_dict={
                                self.X: test_x,
                                self.Y: test_y,
                                self.Seq_len: test_seq_len,
                                self.batch_size: len(test_x)
                            })
                        self.sw_test.add_summary(
                            summary,
                            i * int(epoch_size / batch_size) + j)
                print()
            if not os.path.exists(self.model_dir):
                os.makedirs(self.model_dir)
            saver = tf.train.Saver()
            save_path = saver.save(sess, '%s/model.ckpt' % self.model_dir)
            print("Model saved in file: %s" % save_path)
示例#16
0
def progress_bar():
    denominator = 5 if OPTIONS['--fast'] else 100
    bar = ProgressBar(0 if OPTIONS['--undefined'] else denominator)
    for i in range(denominator + 1):
        bar.numerator = i
        print(bar, end='\r')
        sys.stdout.flush()
        time.sleep(0.25)
    bar.force_done = True  # Needed in case of --undefined.
    print(bar)  # Always print one last time.
示例#17
0
def plot_vae_forward_model_details(data, path, window=1000):
    num_subplots = 4
    bar = ProgressBar(data['re'].shape[0], max_width=40)
    for i in range(data['re'].shape[0]):
        plt.figure(figsize=(8.00, 4 * 5.12))
        ax = plt.subplot(num_subplots, 1, 1)
        ax.set_xlabel('steps')
        ax.set_ylabel('reward')
        ax.grid()

        t = range(data['re'].shape[1])

        mu, sigma = prepare_data(data['re'][i], window)
        plot_curve(ax, mu, sigma, t, 'blue')
        mu, sigma = prepare_data(data['ri'][i], window)
        plot_curve(ax, mu, sigma, t, 'red')
        plt.legend(['external reward', 'internal reward'], loc=4)

        ax = plt.subplot(num_subplots, 1, 2)
        ax.set_xlabel('steps')
        ax.set_ylabel('error')
        ax.grid()

        t = range(data['fme'].shape[1])

        mu, sigma = prepare_data(data['fme'][i], window)
        plot_curve(ax, mu, sigma, t, 'green')
        plt.legend(['prediction error'], loc=1)

        ax = plt.subplot(num_subplots, 1, 3)
        ax.set_xlabel('steps')
        ax.set_ylabel('loss value')
        ax.grid()

        t = range(data['vl'].shape[1])

        mu, sigma = prepare_data(data['vl'][i], window)
        plot_curve(ax, mu, sigma, t, 'orchid')
        plt.legend(['VAE loss'], loc=1)

        ax = plt.subplot(num_subplots, 1, 4)
        ax.set_xlabel('reward magnitude')
        ax.set_ylabel('log count')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()
        bins = np.linspace(0, 1, 50)
        ax.hist(data['fme'][i], bins, color='darkcyan')
        plt.legend(['prediction error reward'], loc=1)

        plt.savefig("{0:s}_{1:d}.png".format(path, i))
        plt.close()

        bar.numerator = i + 1
        print(bar)
示例#18
0
文件: tf_lstm.py 项目: zzzapzzz/PyMLT
    def fit(self, X_data, Y_data):
        # Add an op to initialize the variables.
        init_op = tf.global_variables_initializer()
        batch_size = 64
        time_length = 30

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        lr = 0.0005
        loss_sum = 0
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if os.path.exists('%s/model.ckpt.meta' % self.prev_model):
                ckpt = tf.train.get_checkpoint_state(self.prev_model)
                saver = tf.train.Saver()
                saver.restore(sess, ckpt.model_checkpoint_path)
            for i in range(self.num_epoch):
                lr *= 0.9
                print("\nEpoch %d/%d is started" % (i + 1, self.num_epoch),
                      end='\n')
                bar = ProgressBar(len(X_data) / batch_size, max_width=80)
                for j in range(int(len(X_data) / batch_size) - 1):
                    X_batch = X_data[batch_size * j:batch_size *
                                     (j + 1)].reshape(batch_size, time_length,
                                                      23)
                    Y_batch = Y_data[batch_size * j:batch_size * (j + 1)]
                    _ = sess.run(self.updateModel,
                                 feed_dict={
                                     self.lr: lr,
                                     self.inData: X_batch,
                                     self.target: Y_batch,
                                     self.batch_size: 64,
                                     self.time_length: time_length
                                 })

                    if j % 10 == 0:
                        loss = sess.run(self.loss,
                                        feed_dict={
                                            self.lr: lr,
                                            self.inData: X_batch,
                                            self.target: Y_batch,
                                            self.batch_size: 64,
                                            self.time_length: time_length
                                        })
                        bar.numerator = j + 1
                        loss_sum = ((j / 10) * loss_sum + loss) / (j / 10 + 1)
                        print("%s | loss: %f" % (bar, loss_sum), end='\r')
                        sys.stdout.flush()

            if not os.path.exists(self.model_dir):
                os.makedirs(self.model_dir)
            saver = tf.train.Saver()
            save_path = saver.save(sess, '%s/model.ckpt' % self.model_dir)
            print("Model saved in file: %s" % save_path)
示例#19
0
文件: tf_lstm.py 项目: zzzapzzz/PyMLT
    def load_data_in_account(self):
        # load code list from account
        DATA = []
        with open('../data/stocks_in_account.txt',
                  encoding='utf-8') as f_stocks:
            for line in f_stocks.readlines():
                data = line.split(',')
                DATA.append([data[6].replace('A', ''), data[1], data[0]])

        # load data in DATA
        #con = sqlite3.connect('../data/stock.db')
        X_test = []
        idx_rm = []
        first = True
        bar = ProgressBar(len(DATA), max_width=80)
        for idx, code in enumerate(DATA):
            bar.numerator += 1
            print("%s | %d" % (bar, len(X_test)), end='\r')
            sys.stdout.flush()

            try:
                #df = pd.read_sql("SELECT * from '%s'" % code[0], con, index_col='일자').sort_index()
                df = pd.read_hdf('../data/hdf/%s.hdf' % code[0],
                                 'day').sort_index()
            except pd.io.sql.DatabaseError as e:
                print(e)
                idx_rm.append(idx)
                continue
            data = df.iloc[-30:, :]
            data = data.reset_index()
            for col in data.columns:
                try:
                    data.loc[:, col] = data.loc[:, col].str.replace('--', '-')
                    data.loc[:, col] = data.loc[:, col].str.replace('+', '')
                except AttributeError as e:
                    pass
                    print(e)
            data.loc[:, 'month'] = data.loc[:, '일자'].str[4:6]
            DATA[idx].append(int(data.loc[len(data) - 1, '현재가']))
            data = data.drop(['일자', '체결강도'], axis=1)
            if len(data) < 30:
                idx_rm.append(idx)
                continue
            try:
                data = self.scaler[code[0]].transform(np.array(data))
            except KeyError:
                idx_rm.append(idx)
                continue
            X_test.extend(np.array(data))
        for i in idx_rm[-1:0:-1]:
            del DATA[i]
        X_test = np.array(X_test).reshape(-1, 23 * 30)
        print()
        return X_test, DATA
def test_defined_hour():
    progress_bar = ProgressBar(2000)

    assert '  0% (    0/2,000) [       ] eta --:-- /' == str(progress_bar)

    eta._NOW = lambda: 1411868722.0
    progress_bar.numerator = 1
    assert '  0% (    1/2,000) [       ] eta --:-- -' == str(progress_bar)

    eta._NOW = lambda: 1411868724.0
    progress_bar.numerator = 2
    assert '  0% (    2/2,000) [     ] eta 1:06:36 \\' == str(progress_bar)
    def modelInference(self, testImagesPath = None, outputDir = None, resize = None, validation = None, noiseSet = None, steps = None):
        if not validation:
            self.modelLoad(cpu=False)
            print("\nInferencing on pretrained weights.")
        else:
            print("Validation about to begin.")
        if not noiseSet:
            noiseSet = self.noiseSet
        if testImagesPath:
            self.testImagesPath = testImagesPath
        if outputDir:
            self.resultDir = outputDir
        

        modelInference = inference(inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)

        testImageList = modelInference.testingSetProcessor()
        #print(testImageList, self.testImagesPath)
        barVal = ProgressBar(len(testImageList)/3, max_width=int(50))
        imageCounter = 0
        PSNRval = []
        SSIMVal = []
        c = 0
        from datetime import datetime
        with torch.no_grad():
            for imgPath in testImageList:
                torch.cuda.empty_cache()
                if "_medium" in imgPath:
                    #if int(extractFileName(imgPath, True).split("_")[0]) % 3 ==0:
                    #print(extractFileName(imgPath, True).split("_")[0])
                    c += 1
                    device = self.device
                    imgLDR, lumLDR = modelInference.inputForInference(imgPath, noiseLevel=0)#.to(self.device)
                    #print(imgL.shape, imgR.shape, imgPath)
                    a = datetime.now()
                    output = self.attentionNet(imgLDR.to(device))#.to(device)
                    
                    
                    torch.cuda.empty_cache()
                    output = self.HDRRec(output.detach())
                    b = datetime.now()
                    d = b - a
                    #print( d)
                    torch.cuda.empty_cache()
                    modelInference.saveModelOutput(output, imgPath, steps)
                     
                    imageCounter += 1
                    if imageCounter % 2 == 0:
                        barVal.numerator = imageCounter
                        print(Fore.CYAN + "Image Processd |", barVal,Fore.CYAN, end='\r')
            print(c)
示例#22
0
def report_progress(value, title='', init=False):
    global bar, start_time
    if init:
        start_time = time.time()
        print(datetime.now().strftime('>>> Start @ %Y-%m-%d %H:%M:%S'))
        bar = ProgressBar(value, max_width=100)
    else:
        bar.numerator = value
    elapsed_time = time.strftime("%H:%M:%S",
                                 time.gmtime(time.time() - start_time))

    sys.stdout.write('\r>>> {0} = {1} {2}'.format(elapsed_time, bar, title))
    sys.stdout.flush()
    return
示例#23
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--debug', action='store_true')
    parser.add_argument('--delete',
                        action='store_true',
                        help="Delete old mails, no ask.")
    args = parser.parse_args()

    cfg = load_config()
    cfg.debug = args.debug

    popfunc = POP3_SSL if cfg.pop3.ssl else POP3
    pop = popfunc(cfg.pop3.host)
    pop.user(cfg.pop3.user)
    pop.pass_(cfg.pop3.passwd)

    num = len(pop.list()[1])
    n_delete = 0

    bar = ProgressBar(num)

    for i in range(1, num + 1):
        bar.numerator = i - 1
        if not cfg.debug:
            print(bar, end='\r')
        mail = pop.retr(i)[1]
        if to_delete(mail, cfg):
            n_delete += 1
            if cfg.debug:
                print("Mark {} to be delete".format(i))
            pop.dele(i)
            if n_delete == MAX_DELETE:
                break

    if not args.delete:
        answer = input("Okay to delete {} mails? (y/N) ".format(n_delete))
        if answer != 'y':
            pop.rset()

    pop.quit()

    if n_delete == MAX_DELETE:
        print(
            "There may be more mails to delete.  You may want to re-run this script."
        )
示例#24
0
def main():

    session = vk.Session(TOKEN)
    api = vk.API(session)

    user_ids_names = get_dialogs(api)

    selected_number = show_prompt(user_ids_names)
    id = user_ids_names[selected_number]["id"]
    dirname = mkdir(user_ids_names[selected_number]["name"])
    msgs = get_msgs(api, id)
    urls = get_urls(msgs)

    bar = ProgressBar(0, max_width=60)
    bar.numerator = 0

    with ThreadPoolExecutor(max_workers=5) as executor:
        executor.map(lambda url: download_pics(url, dirname, bar), urls)
示例#25
0
 def write_results(self, entries_to_process):
     '''Write results into CSV file'''
     counter = 1
     # Create progress bar
     bar = ProgressBar(entries_to_process, max_width=72)
     # Write CSV header
     csv_writer = self.open_csv_file()
     # Iter through each feed entry from the hpHosts feed
     for feed_entry in self.hphosts_feed.entries:
         # Stop processing if the number of entries are higher than in '-n'
         if counter > entries_to_process:
             break
         result = {}
         # Update progress bar
         bar.numerator = counter
         print(bar, end='\r')
         # Write phishing site details into CSV
         result['Phishing Site Domain'] = feed_entry.title
         result['Added to hpHosts'] = parse(feed_entry.published)
         result['Phishing Site IP Address'] = re.findall(
             r'[0-9]+(?:\.[0-9]+){3}', feed_entry.summary)[0]
         # Iterate through the third-party DNS services
         for resolver_name in self.resolver_names:
             try:
                 dns_resolvers = self.resolvers[resolver_name]['resolvers']
                 phishing_domain = result['Phishing Site Domain']
                 resolver = DnsResolver(dns_resolvers)
                 # Retrieve the IP addresses that the third-party DNS service resolves
                 ip_addresses = resolver.get_ip_address(phishing_domain)
             except Exception as e:
                 # Write DNS lookup error message in the CSV file
                 result[resolver_name] = e
             else:
                 blockpages = self.resolvers[resolver_name]['blockpages']
                 result[resolver_name] = self.generate_result(
                     ip_addresses, blockpages, resolver_name)
         # Write results into file
         csv_writer.writerow(result)
         # Flush file after writing each line
         self.output_file_handler.flush()
         counter += 1
     # Close output file
     self.output_file_handler.close()
     return counter
示例#26
0
    def __call__(self):

        bar = ProgressBar(len(self.sourceImages), max_width=int(50))
        counter = 0
        for IC, i in enumerate(self.sourceImages):
            img = cv2.imread(i)
            imgTemp = img[:img.shape[0] - self.patchSize, :img.shape[1] -
                          self.patchSize]
            for i in range(0, imgTemp.shape[0], self.patchSize):
                for j in range(0, imgTemp.shape[1], self.patchSize):
                    patch = img[i:i + self.patchSize, j:j + self.patchSize, :]
                    #print (patch.shape)
                    cv2.imwrite(self.targetPath + str(counter) + ".png", patch)
                    counter += 1
            if IC % 2 == 0:
                bar.numerator = IC
                print(Fore.CYAN + "Image Processd |", bar, Fore.CYAN, end='\r')

        print("\n Patch Extracted:", counter)
示例#27
0
def main():
    argc = len(sys.argv) - 1
    if (argc == 0):  # default
        doHelp()
        min = 32
        max = 255
    elif (argc == 2):  #user defined
        min = int(sys.argv[1])
        max = int(sys.argv[2])
    else:
        doHelp()
        sys.exit(0)

    # error-fixing
    if (min < 0): min = 1
    if (max < 0): max = 1
    if (min >= 0x110000): min = 0x110000 - 1
    if (max >= 0x110000): max = 0x110000 - 1

    work = max - min
    work_minestone = math.ceil(work / 5)  # ceil = floor up
    bar = ProgressBar(work, max_width=50)

    i = 0
    s = ""

    for i in range(work + 1):
        k = i + min
        c = chr(k)
        s = s + c

        if (k % work_minestone) == 0:
            bar.numerator = i
            print(bar)
            sys.stdout.flush()

    bar.numerator = i
    print(bar)
    print("")

    pyperclip.copy(s)
    print("done")
示例#28
0
    def load_features(features_fns):
        """
        Load object features from an HDF5 file.
        """
        for features_fn in features_fns:
            print("Loading {}...".format(features_fn))
            with h5py.File(
                features_fn, "r", libver="latest"
            ) as f_features:
                object_ids = f_features["object_id"].asstr()[:]
                vectors = f_features["features"][:]
            
            with database.engine.begin() as conn:
                stmt = (
                    models.objects.update()
                    .where(models.objects.c.object_id == bindparam("_object_id"))
                    .values({"vector": bindparam("vector")})
                )

                # TODO: Use UPDATE ... RETURNING to get the number of affected rows

                bar = ProgressBar(len(object_ids), max_width=40)
                obj_iter = iter(zip(object_ids, vectors))
                while True:
                    chunk = tuple(itertools.islice(obj_iter, 1000))
                    if not chunk:
                        break
                    conn.execute(
                        stmt,
                        [
                            {"_object_id": str(object_id), "vector": vector}
                            for (object_id, vector) in chunk
                        ],
                    )

                    bar.numerator += len(chunk)
                    print(bar, end="\r")
                print()

                # TODO: In the end, print a summary of how many objects have a feature vector now.

                print("Done.")
def main():
    Windows.enable()  # Does nothing if not on Windows.
    # Prepare.
    if os.name == 'nt':
        locale.setlocale(locale.LC_ALL, 'english-us')
    else:
        locale.resetlocale()
    progress_bar = ProgressBar(5 if OPTIONS['--fast'] else 100)
    progress_bar.bar.CHAR_FULL = Color('{autoyellow}#{/autoyellow}')
    progress_bar.bar.CHAR_LEADING = Color('{autoyellow}#{/autoyellow}')
    progress_bar.bar.CHAR_LEFT_BORDER = Color('{autoblue}[{/autoblue}')
    progress_bar.bar.CHAR_RIGHT_BORDER = Color('{autoblue}]{/autoblue}')

    # Run.
    for i in range(6 if OPTIONS['--fast'] else 101):
        progress_bar.numerator = i
        print(progress_bar, end='\r')
        sys.stdout.flush()
        time.sleep(0.25)
    print(progress_bar)  # Always print one last time.
    def test_edit_distances_hgr(self):
        """Assert invariants and symmetries of the edit distance matrices."""

        for p in PALETTES:
            ed = screen.HGRBitmap.edit_distances(p)
            print(p)

            bar = ProgressBar((4 * 2**14 * (2**14 - 1)) / 2, max_width=80)

            cnt = 0
            for ph in range(2):

                # TODO: for HGR this invariant isn't true, all-0 and all-1
                #  values for header/footer/body with/without palette bit can
                #  also have zero difference
                # # Only zero entries should be on diagonal, i.e. of form
                # # i << 14 + i
                # zeros = np.arange(len(ed[ph]))[ed[ph] == 0]
                # for z in zeros:
                #     z1 = z & (2**14-1)
                #     z2 = (z >> 14) & (2**14-1)
                #     if z1 != z2:
                #         self.assertEqual(z1, z2)

                # Assert that matrix is symmetrical
                for i in range(2**14):
                    for j in range(i):
                        cnt += 1

                        if cnt % 10000 == 0:
                            bar.numerator = cnt
                            print(bar, end='\r')
                            sys.stdout.flush()

                        self.assertEqual(
                            ed[ph][(i << 14) + j],
                            ed[ph][(j << 14) + i],
                        )

                        # Matrix is positive definite
                        self.assertGreaterEqual(ed[ph][(i << 14) + j], 0)