Esempio n. 1
0
    def modelInference(self, testImagesPath = None, outputDir = None, resize = None, validation = None, noiseSet = None, steps = None):
        if not validation:
            self.modelLoad()
            print("\nInferencing on pretrained weights.")
        else:
            print("Validation about to begin.")
        if not noiseSet:
            noiseSet = self.noiseSet
        if testImagesPath:
            self.testImagesPath = testImagesPath
        if outputDir:
            self.resultDir = outputDir
        

        modelInference = inference(gridSize=self.binnigFactor, inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)

        testImageList = modelInference.testingSetProcessor()
        barVal = ProgressBar(len(testImageList) * len(noiseSet), max_width=int(50))
        imageCounter = 0
        with torch.no_grad():
            for noise in noiseSet:
                #print(noise)
                for imgPath in testImageList:
                    img = modelInference.inputForInference(imgPath, noiseLevel=noise).to(self.device)
                    output = self.attentionNet(img)
                    modelInference.saveModelOutput(output, imgPath, noise, steps)
                    imageCounter += 1
                    if imageCounter % 2 == 0:
                        barVal.numerator = imageCounter
                        print(Fore.CYAN + "Image Processd |", barVal,Fore.CYAN, end='\r')
        print("\n")
def get_film_reviews(root_url, urls, max_reviews_per_film=None):

    allocine_dic = defaultdict(list)
    bar = ProgressBar(len(urls), max_width=40)

    for i, url in enumerate(urls):
        # Log progress
        bar.numerator = i + 1
        print(bar, end='\r')
        sys.stdout.flush()

        film_id = re.findall(r'\d+', url)[0]
        film_url = "{root}/film/fichefilm-{film_id}/critiques/spectateurs".format(
            root=root_url, film_id=film_id)

        parse_output = parse_film(film_url, max_reviews_per_film)

        if parse_output:
            ratings, reviews, dates, helpfuls = parse_output

            # Rarely happens
            if not (len(ratings) == len(reviews) == len(dates) ==
                    len(helpfuls)):
                print("Error: film-url: " + film_url)
                continue

            allocine_dic['film-url'].extend(len(ratings) * [film_url])
            allocine_dic['rating'].extend(ratings)
            allocine_dic['review'].extend(reviews)
            allocine_dic['date'].extend(dates)
            allocine_dic['helpful'].extend([h[0] for h in helpfuls])
            allocine_dic['unhelpful'].extend([h[1] for h in helpfuls])

    return allocine_dic
def get_film_urls(root_url, max_page=None):
    list_url = "{root}/films".format(root=root_url)
    r = requests.get(list_url)
    soup = BeautifulSoup(r.text, 'html.parser')

    pagination = soup.find("div", {"class": "pagination-item-holder"})
    pages = pagination.find_all("span")
    page_number = int([page.text for page in pages][-1])

    if max_page:
        if max_page > page_number:
            print("Error: max_page is greater than the actual number of pages")
            return []
        else:
            page_number = max_page

    out_urls = []
    bar = ProgressBar(page_number, max_width=40)

    for page_id in range(1, page_number + 1):
        # Log progress
        bar.numerator = page_id
        print(bar, end='\r')
        sys.stdout.flush()

        # Extend out list with new urls
        page_url = "{list_url}/?page={page_num}".format(list_url=list_url,
                                                        page_num=page_id)
        film_urls = parse_list_page(page_url)
        out_urls.extend(film_urls)

    return out_urls
Esempio n. 4
0
    def __call__(self):

        bar = ProgressBar(len(self.sourceImages), max_width=int(50))
        counter = 0
        for IC, i in enumerate(self.sourceImages):
            #print(i)
            tarImgPath = extractFileName(i)
            #print(self.targetPath+tarImgPath)
            img = cv2.imread(i)
            linImg = cv2.imread(self.targetPath+tarImgPath)
            #print(img.shape)
            imgTemp = img[:img.shape[0]- self.patchSize, : img.shape[1]- self.patchSize]
            for i in range(0, imgTemp.shape[0],  self.patchSize):
                for j in range(0, imgTemp.shape[1],  self.patchSize):
                    patch = img[i:i+ self.patchSize, j:j+ self.patchSize, :]
                    LinRGB = linImg[i:i+ self.patchSize, j:j+ self.patchSize, :]
                    sampledLinRGB = quadBayerSampler(LinRGB)
                    #print (patch.shape)
                    cv2.imwrite(self.pathGTPatch+str(counter)+".png", patch)
                    #cv2.imwrite(self.pathGTPatch+str(counter)+"_lRGB.png", LinRGB)
                    cv2.imwrite(self.pathQBPatch+str(counter)+".png", sampledLinRGB)
                    counter += 1
            if IC % 2 == 0:
                bar.numerator = IC
                print(Fore.CYAN + "Image Processd |", bar,Fore.CYAN, end='\r')
            
        print ("\n Patch Extracted:", counter)
Esempio n. 5
0
def eval(model, dataloader):

    model.eval()

    running_loss = []
    total = len(dataloader)
    bar = ProgressBar(total, max_width=80)
    logger.info("Starting Evaluation")
    with torch.no_grad():
        for i, data in tqdm(enumerate(dataloader, 0)):
            bar.numerator = i + 1
            print(bar, end='\r')

            inputs = data
            outputs = model(inputs)
            loss = edit_loss(outputs, inputs)

            running_loss.append(loss.item())
            sys.stdout.flush()

    avg_loss = np.mean(running_loss)
    print("Eval Loss: {}".format(avg_loss))

    plt.plot(np.linspace(1, total, total).astype(int), running_loss)
    if not os.path.exists('losses/'):
        os.makedirs('losses/')
    plt.savefig('losses/eval_loss_{}.png'.format(args.lr))
Esempio n. 6
0
def compute_edit_distance(edp: EditDistanceParams,
                          bitmap_cls: Type[screen.Bitmap],
                          nominal_colours: Type[colours.NominalColours]):
    """Computes edit distance matrix between all pairs of pixel strings.

    Enumerates all possible values of the masked bit representation from
    bitmap_cls (assuming it is contiguous, i.e. we enumerate all
    2**bitmap_cls.MASKED_BITS values).  These are mapped to the dot
    representation, turned into coloured pixel strings, and we compute the
    edit distance.

    The effect of this is that we precompute the effect of storing all possible
    byte values against all possible screen backgrounds (e.g. as
    influencing/influenced by neighbouring bytes).
    """

    bits = bitmap_cls.MASKED_BITS

    bitrange = np.uint64(2**bits)

    edit = []
    for _ in range(len(bitmap_cls.BYTE_MASKS)):
        edit.append(
            np.zeros(shape=np.uint64(bitrange * bitrange), dtype=np.uint16))

    # Matrix is symmetrical with zero diagonal so only need to compute upper
    # triangle
    bar = ProgressBar((bitrange * (bitrange - 1)) / 2, max_width=80)

    num_dots = bitmap_cls.MASKED_DOTS

    cnt = 0
    for i in range(np.uint64(bitrange)):
        for j in range(i):
            cnt += 1

            if cnt % 10000 == 0:
                bar.numerator = cnt
                print(bar, end='\r')
                sys.stdout.flush()

            pair = (np.uint64(i) << bits) + np.uint64(j)

            for o, ph in enumerate(bitmap_cls.PHASES):
                first_dots = bitmap_cls.to_dots(i, byte_offset=o)
                second_dots = bitmap_cls.to_dots(j, byte_offset=o)

                first_pixels = pixel_string(
                    colours.dots_to_nominal_colour_pixel_values(
                        num_dots, first_dots, nominal_colours, init_phase=ph))
                second_pixels = pixel_string(
                    colours.dots_to_nominal_colour_pixel_values(
                        num_dots, second_dots, nominal_colours, init_phase=ph))
                edit[o][pair] = edit_distance(edp,
                                              first_pixels,
                                              second_pixels,
                                              error=False)

    return edit
Esempio n. 7
0
def plot_m2_model_details(data, path, window=1000):
    bar = ProgressBar(data['re'].shape[0], max_width=40)
    num_rows = 2
    num_cols = 2
    for i in range(data['re'].shape[0]):
        fig = plt.figure(figsize=(num_cols * 7.00, num_rows * 7.00))
        ax = plt.subplot(num_rows, num_cols, 1)
        ax.set_xlabel('steps')
        ax.set_ylabel('reward')
        ax.grid()

        t = range(data['re'].shape[1])

        mu, sigma = prepare_data(data['re'][i], window)
        plot_curve(ax, mu, sigma, t, 'blue')
        mu, sigma = prepare_data(data['ri'][i], window)
        plot_curve(ax, mu, sigma, t, 'red')
        plt.legend(['external reward', 'internal reward'], loc=4)

        ax = plt.subplot(num_rows, num_cols, 2)
        ax.set_xlabel('steps')
        ax.set_ylabel('weight')
        ax.grid()

        t = range(data['re'].shape[1])

        mu, sigma = prepare_data(data['m2w'][i][:, 0], window)
        plot_curve(ax, mu, sigma, t, 'blue')
        mu, sigma = prepare_data(data['m2w'][i][:, 1], window)
        plot_curve(ax, mu, sigma, t, 'red')
        plt.legend(['curiosity reward', 'familiarity reward'], loc=4)

        ax = plt.subplot(num_rows, num_cols, 3)
        ax.set_xlabel('steps')
        ax.set_ylabel('error')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()

        t = range(data['fme'].shape[1])

        mu, sigma = prepare_data(data['fme'][i], window)
        plot_curve(ax, mu, sigma, t, 'green')
        plt.legend(['prediction error'], loc=1)

        ax = plt.subplot(num_rows, num_cols, 4)
        ax.set_xlabel('reward magnitude')
        ax.set_ylabel('log count')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()
        bins = np.linspace(0, 1, 50)
        ax.hist(data['fme'][i], bins, color='darkcyan')
        plt.legend(['prediction error reward'], loc=1)

        plt.savefig("{0:s}_{1:d}.png".format(path, i))
        plt.close()

        bar.numerator = i + 1
        print(bar)
Esempio n. 8
0
    def load_all_data(self, begin_date, end_date):
        con = sqlite3.connect('../data/stock.db')
        code_list = con.execute(
            "SELECT name FROM sqlite_master WHERE type='table'").fetchall()
        X_data_list, Y_data_list, DATA_list = [0] * 10, [0] * 10, [0] * 10
        idx = 0
        split = int(len(code_list) / 9)
        bar = ProgressBar(len(code_list), max_width=80)
        for code in code_list:
            data = self.load_data(code[0], begin_date, end_date)
            data = data.dropna()
            X, Y = self.make_x_y(data, code[0])
            if len(X) <= 1: continue
            code_array = [code[0]] * len(X)
            assert len(X) == len(data.loc[29:len(data) - 6, '일자'])
            if idx % split == 0:
                X_data_list[int(idx / split)] = list(X)
                Y_data_list[int(idx / split)] = list(Y)
                DATA_list[int(idx / split)] = np.array([
                    data.loc[29:len(data) - 6, '일자'].values.tolist(),
                    code_array, data.loc[29:len(data) - 6,
                                         '현재가'], data.loc[34:len(data), '현재가']
                ]).T.tolist()
            else:
                X_data_list[int(idx / split)].extend(X)
                Y_data_list[int(idx / split)].extend(Y)
                DATA_list[int(idx / split)].extend(
                    np.array([
                        data.loc[29:len(data) - 6, '일자'].values.tolist(),
                        code_array, data.loc[29:len(data) - 6, '현재가'],
                        data.loc[34:len(data), '현재가']
                    ]).T.tolist())
            bar.numerator += 1
            print("%s | %d" % (bar, len(X_data_list[int(idx / split)])),
                  end='\r')
            sys.stdout.flush()
            idx += 1
        print("%s" % bar)

        print("Merge splited data")
        bar = ProgressBar(10, max_width=80)
        for i in range(10):
            if type(X_data_list[i]) == type(1):
                continue
            if i == 0:
                X_data = X_data_list[i]
                Y_data = Y_data_list[i]
                DATA = DATA_list[i]
            else:
                X_data.extend(X_data_list[i])
                Y_data.extend(Y_data_list[i])
                DATA.extend(DATA_list[i])
            bar.numerator = i + 1
            print("%s | %d" % (bar, len(DATA)), end='\r')
            sys.stdout.flush()
        print("%s | %d" % (bar, len(DATA)))
        return np.array(X_data), np.array(Y_data), np.array(DATA)
Esempio n. 9
0
def styleEditor(imList, maskList, bgList, targetRoot, invertedEditor=False):

    if invertedEditor == False:
        invertedEditorCounter = 1
    else:
        invertedEditorCounter = 2

    imList = imageList(imRoot)
    maskList = imageList(maskRoot)
    bgList = imageList(bgRoot)
    counter = 0
    bar = ProgressBar(len(imList) * len(maskList) * len(bgList) *
                      invertedEditorCounter,
                      max_width=int(10))
    for i in imList:
        for m in maskList:
            for b in bgList:

                mask = cv2.imread(m)
                bg = cv2.imread(b)
                im = cv2.imread(i)

                # Resizing Images
                maskResize = cv2.resize(mask, (im.shape[1], im.shape[0]))
                bgResize = cv2.resize(bg, (im.shape[1], im.shape[0]))

                # Making Mask
                mkIverted = 255 - maskResize

                # Alpha Blending
                blendImage = cv2.bitwise_and(im, mkIverted)
                bgBlend = cv2.bitwise_and(bgResize, maskResize)
                finalBelnding = cv2.bitwise_or(bgBlend.copy(),
                                               blendImage.copy())
                counter += 1
                # Exporting Image
                cv2.imwrite(targetRoot + "styleImage_" + str(counter) + ".png",
                            finalBelnding)

                if invertedEditor == True:
                    # Inverted Blending
                    invertedBelnding = cv2.bitwise_and(im, bgResize)
                    invertedBelnding = cv2.bitwise_and(invertedBelnding,
                                                       mkIverted)
                    counter += 1

                    # Exporting Image
                    cv2.imwrite(
                        targetRoot + "invertedStyleImage_" + str(counter) +
                        ".png", invertedBelnding)

                # Logger
                if counter % 2 == 0:
                    bar.numerator = counter
                    print(Fore.CYAN + "Processd |", bar, Fore.CYAN, end='\r')

    print("\nImage edited {}".format(counter))
Esempio n. 10
0
def plot_forward_inverse_model_details(data, path, window=1000):
    bar = ProgressBar(data['re'].shape[0], max_width=40)
    num_rows = 3
    num_cols = 2
    for i in range(data['re'].shape[0]):
        fig = plt.figure(figsize=(num_cols * 7.00, num_rows * 7.00))
        ax = plt.subplot(num_rows, num_cols, 1)
        ax.set_xlabel('steps')
        ax.set_ylabel('reward')
        ax.grid()

        t = range(data['re'].shape[1])

        mu, sigma = prepare_data(data['re'][i], window)
        plot_curve(ax, mu, sigma, t, 'blue')
        mu, sigma = prepare_data(data['ri'][i], window)
        plot_curve(ax, mu, sigma, t, 'red')
        plt.legend(['external reward', 'internal reward'], loc=4)

        ax = plt.subplot(num_rows, num_cols, 3)
        ax.set_xlabel('steps')
        ax.set_ylabel('error')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()

        t = range(data['fme'].shape[1])
        mu, sigma = prepare_data(data['fme'][i], window)
        plot_curve(ax, mu, sigma, t, 'green')

        t = range(data['ime'].shape[1])
        mu, sigma = prepare_data(data['ime'][i], window)
        plot_curve(ax, mu, sigma, t, 'orchid')
        plt.legend(['prediction model error', 'inverse model error'], loc=1)

        ax = plt.subplot(num_rows, num_cols, 5)
        ax.set_xlabel('reward magnitude')
        ax.set_ylabel('log count')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()
        bins = np.linspace(0, 1, 50)
        ax.hist(data['fme'][i], bins, color='darkcyan')
        plt.legend(['prediction error reward'], loc=1)

        if 'sdm' in data.keys() and 'ldm' in data.keys():
            ax = plt.subplot(num_rows, num_cols, 2)
            c = ax.pcolormesh(data['sdm'][i], cmap='Reds')
            fig.colorbar(c, ax=ax)

            ax = plt.subplot(num_rows, num_cols, 4)
            c = ax.pcolormesh(data['ldm'][i], cmap='Blues')
            fig.colorbar(c, ax=ax)

        plt.savefig("{0:s}_{1:d}.png".format(path, i))
        plt.close()

        bar.numerator = i + 1
        print(bar)
Esempio n. 11
0
    def fit(self, dataset, n_epoch=100, epoch_size=5000):

        init_op = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        test_x, test_y, test_seq_len = dataset.get_test_data()
        acc = 0
        with tf.Session(config=config) as sess:
            #shutil.rmtree('/tmp/TF/MNIST')
            self.sw_train = tf.summary.FileWriter('/tmp/TF/MNIST/train',
                                                  sess.graph)
            self.sw_test = tf.summary.FileWriter('/tmp/TF/MNIST/test')
            sess.run(init_op)
            for i in range(n_epoch):
                print('Epoch %d/%d' % (i + 1, n_epoch))
                bar = ProgressBar(int(epoch_size / batch_size), max_width=80)
                for j in range(int(epoch_size / batch_size)):
                    batch_x, batch_y, batch_seq_len = dataset.next_batch(
                        batch_size)
                    assert batch_x.shape[0] == batch_size and batch_y.shape[
                        0] == batch_size and batch_seq_len.shape[
                            0] == batch_size
                    summary, _, cost = sess.run(
                        [self.merged, self.train_op, self.cost],
                        feed_dict={
                            self.X: batch_x,
                            self.Y: batch_y,
                            self.Seq_len: batch_seq_len,
                            self.batch_size: batch_size
                        })
                    self.sw_train.add_summary(
                        summary,
                        i * int(epoch_size / batch_size) + j)
                    bar.numerator = j + 1
                    print("%s | loss: %f | test_acc: %.2f" %
                          (bar, cost, acc * 100),
                          end='\r')
                    sys.stdout.flush()
                    if j % 100 == 0:
                        summary, cost, acc = sess.run(
                            [self.merged, self.cost, self.accuracy],
                            feed_dict={
                                self.X: test_x,
                                self.Y: test_y,
                                self.Seq_len: test_seq_len,
                                self.batch_size: len(test_x)
                            })
                        self.sw_test.add_summary(
                            summary,
                            i * int(epoch_size / batch_size) + j)
                print()
            if not os.path.exists(self.model_dir):
                os.makedirs(self.model_dir)
            saver = tf.train.Saver()
            save_path = saver.save(sess, '%s/model.ckpt' % self.model_dir)
            print("Model saved in file: %s" % save_path)
Esempio n. 12
0
def progress_bar():
    denominator = 5 if OPTIONS['--fast'] else 100
    bar = ProgressBar(0 if OPTIONS['--undefined'] else denominator)
    for i in range(denominator + 1):
        bar.numerator = i
        print(bar, end='\r')
        sys.stdout.flush()
        time.sleep(0.25)
    bar.force_done = True  # Needed in case of --undefined.
    print(bar)  # Always print one last time.
Esempio n. 13
0
def progress_bar():
    denominator = 5 if OPTIONS['--fast'] else 100
    bar = ProgressBar(0 if OPTIONS['--undefined'] else denominator)
    for i in range(denominator + 1):
        bar.numerator = i
        print(bar, end='\r')
        sys.stdout.flush()
        time.sleep(0.25)
    bar.force_done = True  # Needed in case of --undefined.
    print(bar)  # Always print one last time.
Esempio n. 14
0
    def load_all_data(self, begin_date, end_date):
        #con = sqlite3.connect('../data/stock.db')
        #code_list = con.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
        code_list = glob.glob('../data/hdf/*.hdf')
        code_list = list(map(lambda x: x.split('.hdf')[0][-6:], code_list))
        X_data_list, Y_data_list, DATA_list = [0]*10, [0]*10, [0]*10
        idx = 0
        split = int(len(code_list) / 9)
        bar = ProgressBar(len(code_list), max_width=80)
        for code in code_list:
            data = self.load_data(code, begin_date, end_date)
            if data is None or len(data) == 0:
                continue
            data = data.dropna()
            len_data = len(data)
            X, Y = self.make_x_y(data, code)
            if len(X) <= 10: continue
            mean_velocity = int(data.loc[len_data-10:len_data,'현재가'].mean()) * int(data.loc[len_data-10:len_data, '거래량'].mean())
            #print("mean velocity: %d" % mean_velocity)
            if mean_velocity < 1000000000 or mean_velocity < 10000000: # 10억 이하면 pass
                continue
            code_array = [code] * len(X)
            assert len(X) == len(data.loc[29:len(data)-self.predict_dist-1, '일자'])
            if idx%split == 0:
                X_data_list[int(idx/split)] = list(X)
                Y_data_list[int(idx/split)] = list(Y)
                DATA_list[int(idx/split)] = np.array([data.loc[29:len(data)-6, '일자'].values.tolist(), code_array, data.loc[29:len(data)-6, '현재가'], data.loc[34:len(data), '현재가'], data.loc[30:len(data)-5, '시가']]).T.tolist()
            else:
                X_data_list[int(idx/split)].extend(X)
                Y_data_list[int(idx/split)].extend(Y)
                DATA_list[int(idx/split)].extend(np.array([data.loc[29:len(data)-6, '일자'].values.tolist(), code_array, data.loc[29:len(data)-6, '현재가'], data.loc[34:len(data), '현재가'], data.loc[30:len(data)-5, '시가']]).T.tolist())
            bar.numerator += 1
            print("%s | %d" % (bar, len(X_data_list[int(idx/split)])), end='\r')
            sys.stdout.flush()
            idx += 1
        print("%s" % bar)

        print("Merge splited data")
        bar = ProgressBar(10, max_width=80)
        for i in range(10):
            if type(X_data_list[i]) == type(1):
                continue
            if i == 0:
                X_data = X_data_list[i]
                Y_data = Y_data_list[i]
                DATA = DATA_list[i]
            else:
                X_data.extend(X_data_list[i])
                Y_data.extend(Y_data_list[i])
                DATA.extend(DATA_list[i])
            bar.numerator = i+1
            print("%s | %d" % (bar, len(DATA)), end='\r')
            sys.stdout.flush()
        print("%s | %d" % (bar, len(DATA)))
        return np.array(X_data), np.array(Y_data), np.array(DATA)
Esempio n. 15
0
def plot_vae_forward_model_details(data, path, window=1000):
    num_subplots = 4
    bar = ProgressBar(data['re'].shape[0], max_width=40)
    for i in range(data['re'].shape[0]):
        plt.figure(figsize=(8.00, 4 * 5.12))
        ax = plt.subplot(num_subplots, 1, 1)
        ax.set_xlabel('steps')
        ax.set_ylabel('reward')
        ax.grid()

        t = range(data['re'].shape[1])

        mu, sigma = prepare_data(data['re'][i], window)
        plot_curve(ax, mu, sigma, t, 'blue')
        mu, sigma = prepare_data(data['ri'][i], window)
        plot_curve(ax, mu, sigma, t, 'red')
        plt.legend(['external reward', 'internal reward'], loc=4)

        ax = plt.subplot(num_subplots, 1, 2)
        ax.set_xlabel('steps')
        ax.set_ylabel('error')
        ax.grid()

        t = range(data['fme'].shape[1])

        mu, sigma = prepare_data(data['fme'][i], window)
        plot_curve(ax, mu, sigma, t, 'green')
        plt.legend(['prediction error'], loc=1)

        ax = plt.subplot(num_subplots, 1, 3)
        ax.set_xlabel('steps')
        ax.set_ylabel('loss value')
        ax.grid()

        t = range(data['vl'].shape[1])

        mu, sigma = prepare_data(data['vl'][i], window)
        plot_curve(ax, mu, sigma, t, 'orchid')
        plt.legend(['VAE loss'], loc=1)

        ax = plt.subplot(num_subplots, 1, 4)
        ax.set_xlabel('reward magnitude')
        ax.set_ylabel('log count')
        ax.set_yscale('log', nonpositive='clip')
        ax.grid()
        bins = np.linspace(0, 1, 50)
        ax.hist(data['fme'][i], bins, color='darkcyan')
        plt.legend(['prediction error reward'], loc=1)

        plt.savefig("{0:s}_{1:d}.png".format(path, i))
        plt.close()

        bar.numerator = i + 1
        print(bar)
Esempio n. 16
0
    def fit(self, X_data, Y_data):
        # Add an op to initialize the variables.
        init_op = tf.global_variables_initializer()
        batch_size = 64
        time_length = 30

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        lr = 0.0005
        loss_sum = 0
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if os.path.exists('%s/model.ckpt.meta' % self.prev_model):
                ckpt = tf.train.get_checkpoint_state(self.prev_model)
                saver = tf.train.Saver()
                saver.restore(sess, ckpt.model_checkpoint_path)
            for i in range(self.num_epoch):
                lr *= 0.9
                print("\nEpoch %d/%d is started" % (i + 1, self.num_epoch),
                      end='\n')
                bar = ProgressBar(len(X_data) / batch_size, max_width=80)
                for j in range(int(len(X_data) / batch_size) - 1):
                    X_batch = X_data[batch_size * j:batch_size *
                                     (j + 1)].reshape(batch_size, time_length,
                                                      23)
                    Y_batch = Y_data[batch_size * j:batch_size * (j + 1)]
                    _ = sess.run(self.updateModel,
                                 feed_dict={
                                     self.lr: lr,
                                     self.inData: X_batch,
                                     self.target: Y_batch,
                                     self.batch_size: 64,
                                     self.time_length: time_length
                                 })

                    if j % 10 == 0:
                        loss = sess.run(self.loss,
                                        feed_dict={
                                            self.lr: lr,
                                            self.inData: X_batch,
                                            self.target: Y_batch,
                                            self.batch_size: 64,
                                            self.time_length: time_length
                                        })
                        bar.numerator = j + 1
                        loss_sum = ((j / 10) * loss_sum + loss) / (j / 10 + 1)
                        print("%s | loss: %f" % (bar, loss_sum), end='\r')
                        sys.stdout.flush()

            if not os.path.exists(self.model_dir):
                os.makedirs(self.model_dir)
            saver = tf.train.Saver()
            save_path = saver.save(sess, '%s/model.ckpt' % self.model_dir)
            print("Model saved in file: %s" % save_path)
Esempio n. 17
0
    def load_all_data(self, begin_date, end_date):
        code_list = glob.glob('../data/hdf/*.hdf')
        code_list = list(map(lambda x: x.split('.hdf')[0][-6:], code_list))
        X_data_list, Y_data_list, DATA_list = [0]*10, [0]*10, [0]*10
        idx = 0
        split = int(len(code_list) / 9)
        bar = ProgressBar(len(code_list), max_width=80)
        for code in code_list:
            data = self.load_data(code, begin_date, end_date)
            if data is None or len(data) == 0:
                continue
            data = data.dropna()
            len_data = len(data)
            X, Y = self.make_x_y(data, code)
            if len(X) <= 10: continue
            mean_velocity = int(data.loc[len_data-10:len_data,'현재가'].mean()) * int(data.loc[len_data-10:len_data, '거래량'].mean())
            if mean_velocity < 1000000000: # 10억 이하면 pass
                continue
            code_array = [code] * len(X)
            if len(X) != len(data.loc[self.frame_len-1:len(data)-self.predict_dist-1, '일자']):
                print("lenX:%d, lenData:%d"%(len(X), len(data.loc[self.frame_len-1:len(data)-self.predict_dist-1, '일자'])))
            if idx%split == 0:
                X_data_list[int(idx/split)] = list(X)
                Y_data_list[int(idx/split)] = list(Y)
                DATA_list[int(idx/split)] = np.array([data.loc[self.frame_len-1:len(data)-(self.predict_dist+1), '일자'].values.tolist(), code_array, data.loc[self.frame_len-1:len(data)-(self.predict_dist+1), '현재가'], data.loc[self.frame_len+self.predict_dist-1:len(data), '현재가'], data.loc[self.frame_len:len(data)-self.predict_dist, '시가']]).T.tolist()
            else:
                X_data_list[int(idx/split)].extend(X)
                Y_data_list[int(idx/split)].extend(Y)
                DATA_list[int(idx/split)].extend(np.array([data.loc[self.frame_len-1:len(data)-(self.predict_dist+1), '일자'].values.tolist(), code_array, data.loc[self.frame_len-1:len(data)-(self.predict_dist+1), '현재가'], data.loc[self.frame_len+self.predict_dist-1:len(data), '현재가'], data.loc[self.frame_len:len(data)-self.predict_dist, '시가']]).T.tolist())
            bar.numerator += 1
            print("%s | %d" % (bar, len(X_data_list[int(idx/split)])), end='\r')
            sys.stdout.flush()
            idx += 1
        print("%s" % bar)

        print("Merge splited data")
        bar = ProgressBar(10, max_width=80)
        for i in range(10):
            if type(X_data_list[i]) == type(1):
                continue
            if i == 0:
                X_data = X_data_list[i]
                Y_data = Y_data_list[i]
                DATA = DATA_list[i]
            else:
                X_data.extend(X_data_list[i])
                Y_data.extend(Y_data_list[i])
                DATA.extend(DATA_list[i])
            bar.numerator = i+1
            print("%s | %d" % (bar, len(DATA)), end='\r')
            sys.stdout.flush()
        print("%s | %d" % (bar, len(DATA)))
        return np.array(X_data), np.array(Y_data), np.array(DATA)
def test_defined_hour():
    progress_bar = ProgressBar(2000)

    assert '  0% (    0/2,000) [       ] eta --:-- /' == str(progress_bar)

    eta._NOW = lambda: 1411868722.0
    progress_bar.numerator = 1
    assert '  0% (    1/2,000) [       ] eta --:-- -' == str(progress_bar)

    eta._NOW = lambda: 1411868724.0
    progress_bar.numerator = 2
    assert '  0% (    2/2,000) [     ] eta 1:06:36 \\' == str(progress_bar)
    def modelInference(self, testImagesPath = None, outputDir = None, resize = None, validation = None, noiseSet = None, steps = None):
        if not validation:
            self.modelLoad(cpu=False)
            print("\nInferencing on pretrained weights.")
        else:
            print("Validation about to begin.")
        if not noiseSet:
            noiseSet = self.noiseSet
        if testImagesPath:
            self.testImagesPath = testImagesPath
        if outputDir:
            self.resultDir = outputDir
        

        modelInference = inference(inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)

        testImageList = modelInference.testingSetProcessor()
        #print(testImageList, self.testImagesPath)
        barVal = ProgressBar(len(testImageList)/3, max_width=int(50))
        imageCounter = 0
        PSNRval = []
        SSIMVal = []
        c = 0
        from datetime import datetime
        with torch.no_grad():
            for imgPath in testImageList:
                torch.cuda.empty_cache()
                if "_medium" in imgPath:
                    #if int(extractFileName(imgPath, True).split("_")[0]) % 3 ==0:
                    #print(extractFileName(imgPath, True).split("_")[0])
                    c += 1
                    device = self.device
                    imgLDR, lumLDR = modelInference.inputForInference(imgPath, noiseLevel=0)#.to(self.device)
                    #print(imgL.shape, imgR.shape, imgPath)
                    a = datetime.now()
                    output = self.attentionNet(imgLDR.to(device))#.to(device)
                    
                    
                    torch.cuda.empty_cache()
                    output = self.HDRRec(output.detach())
                    b = datetime.now()
                    d = b - a
                    #print( d)
                    torch.cuda.empty_cache()
                    modelInference.saveModelOutput(output, imgPath, steps)
                     
                    imageCounter += 1
                    if imageCounter % 2 == 0:
                        barVal.numerator = imageCounter
                        print(Fore.CYAN + "Image Processd |", barVal,Fore.CYAN, end='\r')
            print(c)
Esempio n. 20
0
def report_progress(value, title='', init=False):
    global bar, start_time
    if init:
        start_time = time.time()
        print(datetime.now().strftime('>>> Start @ %Y-%m-%d %H:%M:%S'))
        bar = ProgressBar(value, max_width=100)
    else:
        bar.numerator = value
    elapsed_time = time.strftime("%H:%M:%S",
                                 time.gmtime(time.time() - start_time))

    sys.stdout.write('\r>>> {0} = {1} {2}'.format(elapsed_time, bar, title))
    sys.stdout.flush()
    return
def test_defined():
    progress_bar = ProgressBar(2000)

    assert '  0% (    0/2,000) [       ] eta --:-- /' == str(progress_bar)
    assert '  0% (    0/2,000) [       ] eta --:-- -' == str(progress_bar)
    assert '  0% (    0/2,000) [       ] eta --:-- \\' == str(progress_bar)

    eta._NOW = lambda: 1411868722.0
    progress_bar.numerator = 102
    assert '  5% (  102/2,000) [       ] eta --:-- |' == str(progress_bar)
    assert '  5% (  102/2,000) [       ] eta --:-- /' == str(progress_bar)

    eta._NOW = lambda: 1411868722.5
    progress_bar.numerator = 281
    assert ' 14% (  281/2,000) [       ] eta 00:05 -' == str(progress_bar)

    eta._NOW = lambda: 1411868723.0
    progress_bar.numerator = 593
    assert ' 29% (  593/2,000) [##     ] eta 00:03 \\' == str(progress_bar)

    eta._NOW = lambda: 1411868723.5
    progress_bar.numerator = 1925
    assert ' 96% (1,925/2,000) [###### ] eta 00:01 |' == str(progress_bar)

    eta._NOW = lambda: 1411868724.0
    progress_bar.numerator = 1999
    assert ' 99% (1,999/2,000) [###### ] eta 00:01 /' == str(progress_bar)

    eta._NOW = lambda: 1411868724.5
    progress_bar.numerator = 2000
    assert '100% (2,000/2,000) [#######] eta 00:00 -' == str(progress_bar)
    assert '100% (2,000/2,000) [#######] eta 00:00 \\' == str(progress_bar)
    assert '100% (2,000/2,000) [#######] eta 00:00 |' == str(progress_bar)
Esempio n. 22
0
    def load_features(features_fns):
        """
        Load object features from an HDF5 file.
        """
        for features_fn in features_fns:
            print("Loading {}...".format(features_fn))
            with h5py.File(features_fn, "r", libver="latest"
                           ) as f_features, database.engine.begin() as conn:
                object_ids = f_features["object_id"]
                vectors = f_features["features"]

                stmt = (models.objects.update().where(
                    models.objects.c.object_id == bindparam(
                        "_object_id")).values({"vector": bindparam("vector")}))

                bar = ProgressBar(len(object_ids), max_width=40)
                obj_iter = iter(zip(object_ids, vectors))
                while True:
                    chunk = tuple(itertools.islice(obj_iter, 1000))
                    if not chunk:
                        break
                    conn.execute(
                        stmt,
                        [{
                            "_object_id": str(object_id),
                            "vector": vector
                        } for (object_id, vector) in chunk],
                    )

                    bar.numerator += len(chunk)
                    print(bar, end="\r")
                print()
                print("Done.")
Esempio n. 23
0
    def load_all_data(self, begin_date, end_date):
        con = sqlite3.connect('../data/stock.db')
        code_list = con.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
        X_data_list, Y_data_list, DATA_list = [0]*10, [0]*10, [0]*10
        idx = 0
        split = int(len(code_list) / 9)
        bar = ProgressBar(len(code_list), max_width=80)
        for code in code_list:
            data = self.load_data(code[0], begin_date, end_date)
            data = data.dropna()
            X, Y = self.make_x_y(data, code[0])
            if len(X) <= 1: continue
            code_array = [code[0]] * len(X)
            assert len(X) == len(data.loc[29:len(data)-6, '일자'])
            if idx%split == 0:
                X_data_list[int(idx/split)] = list(X)
                Y_data_list[int(idx/split)] = list(Y)
                DATA_list[int(idx/split)] = np.array([data.loc[29:len(data)-6, '일자'].values.tolist(), code_array, data.loc[29:len(data)-6, '현재가'], data.loc[34:len(data), '현재가']]).T.tolist()
            else:
                X_data_list[int(idx/split)].extend(X)
                Y_data_list[int(idx/split)].extend(Y)
                DATA_list[int(idx/split)].extend(np.array([data.loc[29:len(data)-6, '일자'].values.tolist(), code_array, data.loc[29:len(data)-6, '현재가'], data.loc[34:len(data), '현재가']]).T.tolist())
            bar.numerator += 1
            print("%s | %d" % (bar, len(X_data_list[int(idx/split)])), end='\r')
            sys.stdout.flush()
            idx += 1
        print("%s" % bar)

        print("Merge splited data")
        bar = ProgressBar(10, max_width=80)
        for i in range(10):
            if type(X_data_list[i]) == type(1):
                continue
            if i == 0:
                X_data = X_data_list[i]
                Y_data = Y_data_list[i]
                DATA = DATA_list[i]
            else:
                X_data.extend(X_data_list[i])
                Y_data.extend(Y_data_list[i])
                DATA.extend(DATA_list[i])
            bar.numerator = i+1
            print("%s | %d" % (bar, len(DATA)), end='\r')
            sys.stdout.flush()
        print("%s | %d" % (bar, len(DATA)))
        return np.array(X_data), np.array(Y_data), np.array(DATA)
Esempio n. 24
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--debug', action='store_true')
    parser.add_argument('--delete',
                        action='store_true',
                        help="Delete old mails, no ask.")
    args = parser.parse_args()

    cfg = load_config()
    cfg.debug = args.debug

    popfunc = POP3_SSL if cfg.pop3.ssl else POP3
    pop = popfunc(cfg.pop3.host)
    pop.user(cfg.pop3.user)
    pop.pass_(cfg.pop3.passwd)

    num = len(pop.list()[1])
    n_delete = 0

    bar = ProgressBar(num)

    for i in range(1, num + 1):
        bar.numerator = i - 1
        if not cfg.debug:
            print(bar, end='\r')
        mail = pop.retr(i)[1]
        if to_delete(mail, cfg):
            n_delete += 1
            if cfg.debug:
                print("Mark {} to be delete".format(i))
            pop.dele(i)
            if n_delete == MAX_DELETE:
                break

    if not args.delete:
        answer = input("Okay to delete {} mails? (y/N) ".format(n_delete))
        if answer != 'y':
            pop.rset()

    pop.quit()

    if n_delete == MAX_DELETE:
        print(
            "There may be more mails to delete.  You may want to re-run this script."
        )
Esempio n. 25
0
def main():

    session = vk.Session(TOKEN)
    api = vk.API(session)

    user_ids_names = get_dialogs(api)

    selected_number = show_prompt(user_ids_names)
    id = user_ids_names[selected_number]["id"]
    dirname = mkdir(user_ids_names[selected_number]["name"])
    msgs = get_msgs(api, id)
    urls = get_urls(msgs)

    bar = ProgressBar(0, max_width=60)
    bar.numerator = 0

    with ThreadPoolExecutor(max_workers=5) as executor:
        executor.map(lambda url: download_pics(url, dirname, bar), urls)
Esempio n. 26
0
 def write_results(self, entries_to_process):
     '''Write results into CSV file'''
     counter = 1
     # Create progress bar
     bar = ProgressBar(entries_to_process, max_width=72)
     # Write CSV header
     csv_writer = self.open_csv_file()
     # Iter through each feed entry from the hpHosts feed
     for feed_entry in self.hphosts_feed.entries:
         # Stop processing if the number of entries are higher than in '-n'
         if counter > entries_to_process:
             break
         result = {}
         # Update progress bar
         bar.numerator = counter
         print(bar, end='\r')
         # Write phishing site details into CSV
         result['Phishing Site Domain'] = feed_entry.title
         result['Added to hpHosts'] = parse(feed_entry.published)
         result['Phishing Site IP Address'] = re.findall(
             r'[0-9]+(?:\.[0-9]+){3}', feed_entry.summary)[0]
         # Iterate through the third-party DNS services
         for resolver_name in self.resolver_names:
             try:
                 dns_resolvers = self.resolvers[resolver_name]['resolvers']
                 phishing_domain = result['Phishing Site Domain']
                 resolver = DnsResolver(dns_resolvers)
                 # Retrieve the IP addresses that the third-party DNS service resolves
                 ip_addresses = resolver.get_ip_address(phishing_domain)
             except Exception as e:
                 # Write DNS lookup error message in the CSV file
                 result[resolver_name] = e
             else:
                 blockpages = self.resolvers[resolver_name]['blockpages']
                 result[resolver_name] = self.generate_result(
                     ip_addresses, blockpages, resolver_name)
         # Write results into file
         csv_writer.writerow(result)
         # Flush file after writing each line
         self.output_file_handler.flush()
         counter += 1
     # Close output file
     self.output_file_handler.close()
     return counter
Esempio n. 27
0
    def __call__(self):

        bar = ProgressBar(len(self.sourceImages), max_width=int(50))
        counter = 0
        for IC, i in enumerate(self.sourceImages):
            img = cv2.imread(i)
            imgTemp = img[:img.shape[0] - self.patchSize, :img.shape[1] -
                          self.patchSize]
            for i in range(0, imgTemp.shape[0], self.patchSize):
                for j in range(0, imgTemp.shape[1], self.patchSize):
                    patch = img[i:i + self.patchSize, j:j + self.patchSize, :]
                    #print (patch.shape)
                    cv2.imwrite(self.targetPath + str(counter) + ".png", patch)
                    counter += 1
            if IC % 2 == 0:
                bar.numerator = IC
                print(Fore.CYAN + "Image Processd |", bar, Fore.CYAN, end='\r')

        print("\n Patch Extracted:", counter)
Esempio n. 28
0
def main():
    argc = len(sys.argv) - 1
    if (argc == 0):  # default
        doHelp()
        min = 32
        max = 255
    elif (argc == 2):  #user defined
        min = int(sys.argv[1])
        max = int(sys.argv[2])
    else:
        doHelp()
        sys.exit(0)

    # error-fixing
    if (min < 0): min = 1
    if (max < 0): max = 1
    if (min >= 0x110000): min = 0x110000 - 1
    if (max >= 0x110000): max = 0x110000 - 1

    work = max - min
    work_minestone = math.ceil(work / 5)  # ceil = floor up
    bar = ProgressBar(work, max_width=50)

    i = 0
    s = ""

    for i in range(work + 1):
        k = i + min
        c = chr(k)
        s = s + c

        if (k % work_minestone) == 0:
            bar.numerator = i
            print(bar)
            sys.stdout.flush()

    bar.numerator = i
    print(bar)
    print("")

    pyperclip.copy(s)
    print("done")
Esempio n. 29
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--debug', action='store_true')
    parser.add_argument('--delete', action='store_true',
                        help="Delete old mails, no ask.")
    args = parser.parse_args()

    cfg = load_config()
    cfg.debug = args.debug

    popfunc = POP3_SSL if cfg.pop3.ssl else POP3
    pop = popfunc(cfg.pop3.host)
    pop.user(cfg.pop3.user)
    pop.pass_(cfg.pop3.passwd)

    num = len(pop.list()[1])
    n_delete = 0

    bar = ProgressBar(num)

    for i in range(1, num+1):
        bar.numerator = i - 1
        if not cfg.debug:
            print(bar, end='\r')
        mail = pop.retr(i)[1]
        if to_delete(mail, cfg):
            n_delete += 1
            if cfg.debug:
                print("Mark {} to be delete".format(i))
            pop.dele(i)
            if n_delete == MAX_DELETE:
                break

    if not args.delete:
        answer = input("Okay to delete {} mails? (y/N) ".format(n_delete))
        if answer != 'y':
            pop.rset()

    pop.quit()

    if n_delete == MAX_DELETE:
        print("There may be more mails to delete.  You may want to re-run this script.")
def main():
    Windows.enable()  # Does nothing if not on Windows.
    # Prepare.
    if os.name == 'nt':
        locale.setlocale(locale.LC_ALL, 'english-us')
    else:
        locale.resetlocale()
    progress_bar = ProgressBar(5 if OPTIONS['--fast'] else 100)
    progress_bar.bar.CHAR_FULL = Color('{autoyellow}#{/autoyellow}')
    progress_bar.bar.CHAR_LEADING = Color('{autoyellow}#{/autoyellow}')
    progress_bar.bar.CHAR_LEFT_BORDER = Color('{autoblue}[{/autoblue}')
    progress_bar.bar.CHAR_RIGHT_BORDER = Color('{autoblue}]{/autoblue}')

    # Run.
    for i in range(6 if OPTIONS['--fast'] else 101):
        progress_bar.numerator = i
        print(progress_bar, end='\r')
        sys.stdout.flush()
        time.sleep(0.25)
    print(progress_bar)  # Always print one last time.
Esempio n. 31
0
    def load_data_in_account(self):
        # load code list from account
        DATA = []
        with open('../data/stocks_in_account.txt',
                  encoding='utf-8') as f_stocks:
            for line in f_stocks.readlines():
                data = line.split(',')
                DATA.append([data[6].replace('A', ''), data[1], data[0]])

        # load data in DATA
        #con = sqlite3.connect('../data/stock.db')
        X_test = []
        idx_rm = []
        first = True
        bar = ProgressBar(len(DATA), max_width=80)
        for idx, code in enumerate(DATA):
            bar.numerator += 1
            print("%s | %d" % (bar, len(X_test)), end='\r')
            sys.stdout.flush()

            try:
                #df = pd.read_sql("SELECT * from '%s'" % code[0], con, index_col='일자').sort_index()
                df = pd.read_hdf('../data/hdf/%s.hdf' % code[0],
                                 'day').sort_index()
            except pd.io.sql.DatabaseError as e:
                print(e)
                idx_rm.append(idx)
                continue
            data = df.iloc[-30:, :]
            data = data.reset_index()
            for col in data.columns:
                try:
                    data.loc[:, col] = data.loc[:, col].str.replace('--', '-')
                    data.loc[:, col] = data.loc[:, col].str.replace('+', '')
                except AttributeError as e:
                    pass
                    print(e)
            data.loc[:, 'month'] = data.loc[:, '일자'].str[4:6]
            DATA[idx].append(int(data.loc[len(data) - 1, '현재가']))
            data = data.drop(['일자', '체결강도'], axis=1)
            if len(data) < 30:
                idx_rm.append(idx)
                continue
            try:
                data = self.scaler[code[0]].transform(np.array(data))
            except KeyError:
                idx_rm.append(idx)
                continue
            X_test.extend(np.array(data))
        for i in idx_rm[-1:0:-1]:
            del DATA[i]
        X_test = np.array(X_test).reshape(-1, 23 * 30)
        print()
        return X_test, DATA
Esempio n. 32
0
 def load_current_data(self):
     code_list = glob.glob('../data/hdf/*.hdf')
     code_list = list(map(lambda x: x.split('.hdf')[0][-6:], code_list))
     X_test = []
     DATA = []
     first = True
     bar = ProgressBar(len(code_list), max_width=80)
     #for code in code_list:
     code_list_ret = []
     for i, code in enumerate(code_list):
         bar.numerator = i + 1
         print("%s | %d" % (bar, len(X_test)), end='\r')
         sys.stdout.flush()
         df = pd.read_hdf('../data/hdf/%s.hdf' % code, 'day').sort_index()
         data = df.iloc[-self.frame_len:, :]
         if pd.to_numeric(data.loc[:, '현재가']).mean() * pd.to_numeric(
                 data.loc[:, '거래량']).mean() < 1000000000:
             continue
         data = data.reset_index()
         for col in data.columns:
             try:
                 data.loc[:, col] = data.loc[:, col].str.replace('--', '-')
                 data.loc[:, col] = data.loc[:, col].str.replace('+', '')
             except AttributeError as e:
                 pass
         data.loc[:, 'month'] = data.loc[:, '일자'] % 10000 / 100
         data = data.drop(['일자', '체결강도'], axis=1)
         if len(data) < self.frame_len:
             continue
         try:
             data_t = self.scaler[code].transform(np.array(data))
         except (KeyError, ValueError):
             continue
         DATA.append(int(data.loc[len(data) - 1, '현재가']))
         code_list_ret.append(code)
         X_test.extend(np.array(data_t))
     X_test = np.array(X_test).reshape(-1, 23 * self.frame_len)
     print()
     assert len(X_test) == len(code_list_ret)
     assert len(X_test) == len(DATA)
     return X_test, code_list_ret, DATA
    def test_edit_distances_hgr(self):
        """Assert invariants and symmetries of the edit distance matrices."""

        for p in PALETTES:
            ed = screen.HGRBitmap.edit_distances(p)
            print(p)

            bar = ProgressBar((4 * 2**14 * (2**14 - 1)) / 2, max_width=80)

            cnt = 0
            for ph in range(2):

                # TODO: for HGR this invariant isn't true, all-0 and all-1
                #  values for header/footer/body with/without palette bit can
                #  also have zero difference
                # # Only zero entries should be on diagonal, i.e. of form
                # # i << 14 + i
                # zeros = np.arange(len(ed[ph]))[ed[ph] == 0]
                # for z in zeros:
                #     z1 = z & (2**14-1)
                #     z2 = (z >> 14) & (2**14-1)
                #     if z1 != z2:
                #         self.assertEqual(z1, z2)

                # Assert that matrix is symmetrical
                for i in range(2**14):
                    for j in range(i):
                        cnt += 1

                        if cnt % 10000 == 0:
                            bar.numerator = cnt
                            print(bar, end='\r')
                            sys.stdout.flush()

                        self.assertEqual(
                            ed[ph][(i << 14) + j],
                            ed[ph][(j << 14) + i],
                        )

                        # Matrix is positive definite
                        self.assertGreaterEqual(ed[ph][(i << 14) + j], 0)
def test_undefined():
    misc.terminal_width = lambda: 40
    progress_bar = ProgressBar(None, max_width=30)

    assert '0 [?             ] eta --:-- /' == str(progress_bar)
    assert '0 [ ?            ] eta --:-- -' == str(progress_bar)
    assert '0 [  ?           ] eta --:-- \\' == str(progress_bar)

    eta._NOW = lambda: 1411868722.0
    progress_bar.numerator = 10
    assert '10 [   ?         ] eta --:-- |' == str(progress_bar)
    assert '10 [    ?        ] eta --:-- /' == str(progress_bar)

    eta._NOW = lambda: 1411868722.5
    progress_bar.numerator = 100
    assert '100 [     ?      ] eta --:-- -' == str(progress_bar)

    eta._NOW = lambda: 1411868723.0
    progress_bar.numerator = 1954727
    assert '1,954,727 [    ? ] eta --:-- \\' == str(progress_bar)
    assert '1,954,727 [   ?  ] eta --:-- |' == str(progress_bar)
Esempio n. 35
0
 def load_current_data(self):
     code_list = glob.glob('../data/hdf/*.hdf')
     code_list = list(map(lambda x: x.split('.hdf')[0][-6:], code_list))
     X_test = []
     DATA = []
     first = True
     bar = ProgressBar(len(code_list), max_width=80)
     #for code in code_list:
     code_list_ret = []
     for i, code in enumerate(code_list):
         bar.numerator = i+1
         print("%s | %d" % (bar, len(X_test)), end='\r')
         sys.stdout.flush()
         df = pd.read_hdf('../data/hdf/%s.hdf'%code, 'day').sort_index()
         data = df.iloc[-self.frame_len:,:]
         if pd.to_numeric(data.loc[:, '현재가']).mean() * pd.to_numeric(data.loc[:, '거래량']).mean() < 1000000000:
             continue
         data = data.reset_index()
         for col in data.columns:
             try:
                 data.loc[:, col] = data.loc[:, col].str.replace('--', '-')
                 data.loc[:, col] = data.loc[:, col].str.replace('+', '')
             except AttributeError as e:
                 pass
         data.loc[:, 'month'] = data.loc[:, '일자']%10000/100
         data = data.drop(['일자', '체결강도'], axis=1)
         if len(data) < self.frame_len:
             continue
         try:
             data_t = self.scaler[code].transform(np.array(data))
         except (KeyError, ValueError):
             continue
         DATA.append(int(data.loc[len(data)-1, '현재가']))
         code_list_ret.append(code)
         X_test.extend(np.array(data_t))
     X_test = np.array(X_test).reshape(-1, 23*self.frame_len)
     print()
     assert len(X_test) == len(code_list_ret)
     assert len(X_test) == len(DATA)
     return X_test, code_list_ret, DATA
Esempio n. 36
0
    def fit(self, X_data, Y_data):
        # Add an op to initialize the variables.
        init_op = tf.global_variables_initializer()
        batch_size = 128

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        lr = 0.005
        loss_sum = 0
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if os.path.exists('%s/model.ckpt.meta'):
                ckpt = tf.train.get_checkpoint_state(self.model_dir)
                saver = tf.train.Saver()
                saver.restore(sess, ckpt.model_checkpoint_path)
            for i in range(self.num_epoch):
                lr *= 0.9
                print("\nEpoch %d/%d is started" % (i+1, self.num_epoch), end='\n')
                bar = ProgressBar(len(X_data)/batch_size, max_width=80)
                for j in range(int(len(X_data)/batch_size)-1):
                    X_batch = X_data[batch_size*j:batch_size*(j+1)]
                    Y_batch = Y_data[batch_size*j:batch_size*(j+1)]
                    _ = sess.run(self.updateModel, feed_dict={self.lr:lr, self.scalarInput: X_batch, self.target: Y_batch})

                    if j%10 == 0:
                        loss = sess.run(self.loss, feed_dict={self.lr:lr, self.scalarInput: X_batch, self.target: Y_batch})
                        bar.numerator = j+1
                        loss_sum = ((j/10)*loss_sum + loss)/(j/10+1)
                        print("%s | loss: %f" % (bar, loss_sum), end='\r')
                        sys.stdout.flush()

            if not os.path.exists(self.model_dir):
                os.makedirs(self.model_dir)
            saver = tf.train.Saver()
            save_path = saver.save(sess,'%s/model.ckpt' % self.model_dir)
            print("Model saved in file: %s" % save_path)
def test_defined_long():
    progress_bar = ProgressBar(20)

    assert '  0% ( 0/20) [             ] eta --:-- -' == str(progress_bar)
    assert '  0% ( 0/20) [             ] eta --:-- \\' == str(progress_bar)

    eta._NOW = lambda: 1411868722.0
    progress_bar.numerator = 1
    assert '  5% ( 1/20) [             ] eta --:-- |' == str(progress_bar)
    assert '  5% ( 1/20) [             ] eta --:-- /' == str(progress_bar)

    eta._NOW = lambda: 1411868722.5
    progress_bar.numerator = 2
    assert ' 10% ( 2/20) [#            ] eta 00:09 -' == str(progress_bar)

    eta._NOW = lambda: 1411868723.0
    progress_bar.numerator = 3
    assert ' 15% ( 3/20) [#            ] eta 00:09 \\' == str(progress_bar)

    eta._NOW = lambda: 1411868723.5
    progress_bar.numerator = 4
    assert ' 20% ( 4/20) [##           ] eta 00:08 |' == str(progress_bar)

    eta._NOW = lambda: 1411868724.0
    progress_bar.numerator = 5
    assert ' 25% ( 5/20) [###          ] eta 00:08 /' == str(progress_bar)

    eta._NOW = lambda: 1411868724.5
    progress_bar.numerator = 6
    assert ' 30% ( 6/20) [###          ] eta 00:07 -' == str(progress_bar)

    eta._NOW = lambda: 1411868725.0
    progress_bar.numerator = 7
    assert ' 35% ( 7/20) [####         ] eta 00:07 \\' == str(progress_bar)

    eta._NOW = lambda: 1411868725.5
    progress_bar.numerator = 8
    assert ' 40% ( 8/20) [#####        ] eta 00:06 |' == str(progress_bar)

    eta._NOW = lambda: 1411868726.0
    progress_bar.numerator = 9
    assert ' 45% ( 9/20) [#####        ] eta 00:06 /' == str(progress_bar)

    eta._NOW = lambda: 1411868726.5
    progress_bar.numerator = 10
    assert ' 50% (10/20) [######       ] eta 00:05 -' == str(progress_bar)

    eta._NOW = lambda: 1411868727.0
    progress_bar.numerator = 11
    assert ' 55% (11/20) [#######      ] eta 00:05 \\' == str(progress_bar)

    eta._NOW = lambda: 1411868727.5
    progress_bar.numerator = 12
    assert ' 60% (12/20) [#######      ] eta 00:04 |' == str(progress_bar)

    eta._NOW = lambda: 1411868728.0
    progress_bar.numerator = 13
    assert ' 65% (13/20) [########     ] eta 00:04 /' == str(progress_bar)

    eta._NOW = lambda: 1411868728.5
    progress_bar.numerator = 14
    assert ' 70% (14/20) [#########    ] eta 00:03 -' == str(progress_bar)

    eta._NOW = lambda: 1411868729.0
    progress_bar.numerator = 15
    assert ' 75% (15/20) [#########    ] eta 00:03 \\' == str(progress_bar)

    eta._NOW = lambda: 1411868729.5
    progress_bar.numerator = 16
    assert ' 80% (16/20) [##########   ] eta 00:02 |' == str(progress_bar)

    eta._NOW = lambda: 1411868730.0
    progress_bar.numerator = 17
    assert ' 85% (17/20) [###########  ] eta 00:02 /' == str(progress_bar)

    eta._NOW = lambda: 1411868730.5
    progress_bar.numerator = 18
    assert ' 90% (18/20) [###########  ] eta 00:01 -' == str(progress_bar)

    eta._NOW = lambda: 1411868731.0
    progress_bar.numerator = 19
    assert ' 95% (19/20) [############ ] eta 00:01 \\' == str(progress_bar)

    eta._NOW = lambda: 1411868731.5
    progress_bar.numerator = 20
    assert '100% (20/20) [#############] eta 00:00 |' == str(progress_bar)
sponsoredDict = pickle.load(open( "sponsoredDict.p", "rb")) #key: website, value: filenames
notsponsoredDict = pickle.load(open( "notsponsoredDict.p", "rb")) #key: website, value: filenames
sampleDict = pickle.load(open( "sampleDict.p", "rb")) #key: filename, value: websites

sponsoredDict_reordered = defaultdict(list)
notsponsoredDict_reordered = defaultdict(list)

for website, filenames in sponsoredDict.items():
	for f in filenames:
		sponsoredDict_reordered[f].append(website)
for website, filenames in notsponsoredDict.items():
	for f in filenames:
		notsponsoredDict_reordered[f].append(website)


bar = ProgressBar(len(train), max_width=40)
train_data = []
for index, row in train.iterrows():
	website_string = ' '.join(sponsoredDict_reordered[row['file']]) + ' '.join(notsponsoredDict_reordered[row['file']])
	bar.numerator = index
	print(bar, end='\r')
	sys.stdout.flush()
	train_data.append(website_string)
print()

bar = ProgressBar(len(sample), max_width=40)
sample_data = []
for index, row in sample.iterrows():
	website_string = ' '.join(sampleDict[row['file']])
	bar.numerator = index
	print(bar, end='\r')
Esempio n. 39
0
from celery.result import ResultSet

from sklearn.ensemble import RandomForestClassifier
import pandas as pd
#https://pypi.python.org/pypi/etaprogress/
from etaprogress.progress import ProgressBar


print('--- Read training labels')
train = pd.read_csv('./data/train_v2.csv')
train_keys = dict([a[1] for a in train.iterrows()])
test_files = set(pd.read_csv('./data/sampleSubmission_v2.csv').file.values)

print("--- Started processing")
result = ResultSet([])
bar = ProgressBar(len(train)+len(test_files), max_width=40)
#https://celery.readthedocs.org/en/latest/reference/celery.result.html#celery.result.ResultSet
for k, filename in enumerate(list(train['file'])+list(test_files)):
	if filename in train_keys:
		result.add(processFile.delay(filename, train_keys[filename]))
	elif filename != "":
		result.add(processFile.delay(filename, 2))
	
	#sponsored = train.loc[train['file'] == openfile]
	#if not sponsored.empty:
		#result.add(processFile.delay(openfile, data, int(sponsored['sponsored'])))
	#testing = sample.loc[sample['file'] == openfile]
	#if not testing.empty:
		#result.add(processFile.delay(openfile, data, int(sponsored['sponsored'])))

def addNodes(nodes):
	for n in nodes:
		if n not in q:
			q.append(n)

train = pd.read_csv("./data/train.csv", header=0, delimiter=",", quoting=3)
sample = pd.read_csv("./data/sampleSubmission.csv", header=0, delimiter=",", quoting=3)

print("Starting processing...")

q = []

for i, zipFile in enumerate(process_zips):			
	archive = zipfile.ZipFile(zipFile, 'r')
	file_paths = zipfile.ZipFile.namelist(archive)
	bar = ProgressBar(len(file_paths), max_width=40)
	pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1 or 1)
	for k, file_path in enumerate(file_paths):
		data = archive.read(file_path)
		openfile = file_path[2:] #filename
		sponsored = train.loc[train['file'] == openfile]
		if not sponsored.empty:
			pool.apply_async(parseFile, args = (data, openfile, int(sponsored['sponsored']), ), callback = addNodes)
		testing = sample.loc[sample['file'] == openfile]
		if not testing.empty:
			pool.apply_async(parseFile, args = (data, openfile, 2, ), callback = addNodes)

		bar.numerator = k
		print("Folder:", i, bar, end='\r')
		sys.stdout.flush()
	pool.close()
Esempio n. 41
0
from rq_task import processFile


print('--- Read training labels')
train = pd.read_csv('./data/train_v2.csv')
train_keys = dict([a[1] for a in train.iterrows()])
test_files = set(pd.read_csv('./data/sampleSubmission_v2.csv').file.values)


filepaths = glob.glob('data/*/*.txt')

#random.shuffle(filepaths)
#filepaths = filepaths[0:1000]


bar = ProgressBar(len(filepaths), max_width=40)
print("--- Started processing")
redis_conn = Redis('192.168.1.140', 6379)
q = Queue("low", connection=redis_conn)
jobs = []
for i, filepath in enumerate(filepaths):
	bar.numerator = i
	filename = os.path.basename(filepath)
	if filename in train_keys:
		jobs.append(q.enqueue_call(func=processFile, args=(filepath, train_keys[filename],), timeout=300))
	else:
		jobs.append(q.enqueue_call(func=processFile, args=(filepath, 2,), timeout=300))
	print("Adding jobs to queue", bar, end='\r')
	sys.stdout.flush()
print()
		values['javascript_length'] += len(j)
		
	values['img_count'] = len(parsed.findAll('img'))
	
	return values	


filepaths = glob.glob('data/*/*.txt')

#random.shuffle(filepaths)
#filepaths = filepaths[0:1000]

num_tasks = len(filepaths)


bar = ProgressBar(num_tasks, max_width=40)
p = multiprocessing.Pool()
results = p.imap(create_data, filepaths)
print("--- Started processing")
while (True):
	bar.numerator = results._index
	print(bar, end='\r')
	sys.stdout.flush()
	time.sleep(1)
	if (results._index == num_tasks): break
p.close()
p.join()
print()

df_full = pd.DataFrame(list(results))