Пример #1
0
    def newFigure(self):
        if self.nextFigure == None:
            self.figure = Figure((self.width // 2) - 2, 0)
        else:
            self.figure = self.nextFigure

        self.nextFigure = Figure((self.width // 2) - 2, 0) # add a new figure to the game in the middle top
Пример #2
0
 def new_figure(self):
     if not self.pass_Figures:
         self.pass_Figures = Figure.Figures
     pass_figure = randint(0, len(self.pass_Figures) - 1)
     self.Figure = Figure(3, 0, pass_figure)
     self.figures.append(self.Figure)
     if self.intersects():
         self.state = "gameover"
Пример #3
0
 def __init__(self, parent):
     super().__init__(parent)
     self.board = [0 for i in range(200)]
     self.timer = QBasicTimer()
     self.setFocusPolicy(Qt.StrongFocus)
     self.figure = Figure()
     self.cube = 20
     self.linesRemoved = 0
     self.scores = 0
Пример #4
0
    def Solve(self, problem):
        print problem.name
        problem_name = problem.name
        answer = -1
        scores = []

        #Load all figures and make them black OR white (no grey!)
        self.figure_a = Figure(problem.figures['A'].visualFilename)
        self.figure_b = Figure(problem.figures['B'].visualFilename)
        self.figure_c = Figure(problem.figures['C'].visualFilename)
        self.figure_d = Figure(problem.figures['D'].visualFilename)
        self.figure_e = Figure(problem.figures['E'].visualFilename)
        self.figure_f = Figure(problem.figures['F'].visualFilename)
        self.figure_g = Figure(problem.figures['G'].visualFilename)
        self.figure_h = Figure(problem.figures['H'].visualFilename)
        self.solutions = []
        problem_figure_keys = sorted(problem.figures.keys())
        num_solutions = 8
        for i in range(num_solutions):
            figure_sol = Figure(
                problem.figures[problem_figure_keys[i]].visualFilename)
            self.solutions.append(figure_sol)

        answer = self.get_solution()
        '''
        print problem.name
        print "Scores :", scores
        print "Correct answer: ", problem.correctAnswer
        print "Answer selected: ", answer, '\n'
        '''
        return answer
Пример #5
0
    def removeFigure(self, Figure):
        # if Figure.short == 'k' or Figure.short == 'K':
        # return None
        #print(Figure in self.getFigures(),self.is_the_piece_on_the_board_pos(Figure.pos) )

        if Figure in self.getFigures() and self.is_the_piece_on_the_board_pos(
                Figure.pos):
            self.figures.remove(Figure)
            #print('succesful remove', Figure)
            self.board[Figure.getPos()[0]][Figure.getPos()[1]] = '_'
        else:
            #print('no such figure',Figure, Figure.pos)
            pass
Пример #6
0
 def __init__(self, plotNum):
   if plotSize == "poster":
     fontsize = 14
   elif plotSize == "presentation":
     fontsize = 14
   elif plotSize == "manual":
     fontsize = 10
   else:
     raise ValueError("Unknown plotSize '%s'." % plotSize)
   Figure.__init__(self, color=style, fontsize=fontsize)
   self.plotNum = plotNum
   
   return
Пример #7
0
class Game:
    def __init__(self):
        # set title for the game window
        pygame.display.set_caption("Hangman")

        # clock that sets the amount of frames per second
        self.fps_clock = pygame.time.Clock()

        self.settings = Settings()

        # create display surface on which everithing is drawn
        self.DISPLAY_SURFACE = pygame.display.set_mode((self.settings.WINDOW_WIDTH, 
                                                    self.settings.WINDOW_HEIGHT))

        # create instance of figure class
        self.figure = Figure(self.DISPLAY_SURFACE)
        self.ta = TextArea(self.DISPLAY_SURFACE)

    def run(self):
        while True:
            self.DISPLAY_SURFACE.fill(self.settings.BLACK)

            # check key and mouse events
            for event in pygame.event.get():
                if event.type == QUIT:
                    pygame.quit()
                    sys.exit()
                elif event.type == KEYDOWN and self.ta.state == "active":
                    if 97 <= event.key <= 122:
                        self.ta.check_letter(chr(event.key))
                        self.ta.check_game_state()
                        self.figure.guesses = len(self.ta.guessed_wrong)
                elif event.type == KEYDOWN and self.ta.state != "active":
                    if event.key == K_RETURN or event.key == K_SPACE:
                        self.ta.reset_text()
                        self.figure.reset_figure()

            if self.ta.state == "active":
                self.ta.display_textarea()
            elif self.ta.state == "win":
                self.ta.display_win()
            elif self.ta.state == "lost":
                self.ta.display_lost()

            self.figure.draw_figure()
            
            pygame.display.update()
            self.fps_clock.tick(self.settings.FPS)
Пример #8
0
    def __init__(self):
        # set title for the game window
        pygame.display.set_caption("Hangman")

        # clock that sets the amount of frames per second
        self.fps_clock = pygame.time.Clock()

        self.settings = Settings()

        # create display surface on which everithing is drawn
        self.DISPLAY_SURFACE = pygame.display.set_mode((self.settings.WINDOW_WIDTH, 
                                                    self.settings.WINDOW_HEIGHT))

        # create instance of figure class
        self.figure = Figure(self.DISPLAY_SURFACE)
        self.ta = TextArea(self.DISPLAY_SURFACE)
Пример #9
0
def generate_label_train_samples(c):
    """
    Generate label train sample (image patches).
    :param c:
    :return:
    """
    list_file = c.list_file
    target_folder = c.labels_folder

    print('generate_label_train_samples with list_file={0} and save to {1}'.format(list_file, target_folder))
    input("Press Enter to continue...")

    with open(list_file) as f:
        lines = f.readlines()

    # Remove whitespace characters, and then construct the figures
    figures = [Figure(line.strip()) for line in lines]

    # Clear the folder

    if os.path.exists(target_folder):
        shutil.rmtree(target_folder)
    os.mkdir(target_folder)

    for figure in figures:
        print("Processing {0}".format(figure.id))
        figure.load_gt_annotation(which_annotation='label')
        figure.load_image()
        if c.color_type == cv2.IMREAD_COLOR:
            figure.crop_label_patches(is_gray=False)
        else:
            figure.crop_label_patches(is_gray=True)
        figure.save_label_patches(target_folder)
Пример #10
0
    def newTurn(self):
        newFigure = Figure()
        self.dropFigure()
        used = self._getUsedPlaces()
        for i in range(4):
            if tuple(newFigure.position[i]) in used:
                break
        else:
            self.figure = newFigure
            count = 0
            while self.removeFullLines():
                self.linesRemoved += 1
                count += 1

            if count == 1:
                self.scoresMsg.emit(100)
            elif count == 2:
                self.scoresMsg.emit(300)
            elif count == 3:
                self.scoresMsg.emit(600)
            elif count == 4:
                self.scoresMsg.emit(1000)

            self.msg2statusbar.emit("Lines removed: " + str(self.linesRemoved))
            return
        self.gameOver()
Пример #11
0
    def Solve(self, problem):
        print problem.name
        problem_name = problem.name
        answer = -1
        scores = []

        if 'Problem B' in problem_name or 'Problem C' in problem_name:
            print problem_name, 'skipped\n'
            return answer

        start_time = time.time()

        # Load all figures and make them black OR white (no grey!)
        print 'Loading figures for %s...' % problem_name
        self.figure_a = Figure(problem.figures['A'].visualFilename)
        self.figure_b = Figure(problem.figures['B'].visualFilename)
        self.figure_c = Figure(problem.figures['C'].visualFilename)
        self.figure_d = Figure(problem.figures['D'].visualFilename)
        self.figure_e = Figure(problem.figures['E'].visualFilename)
        self.figure_f = Figure(problem.figures['F'].visualFilename)
        self.figure_g = Figure(problem.figures['G'].visualFilename)
        self.figure_h = Figure(problem.figures['H'].visualFilename)

        self.figures = [self.figure_a, self.figure_b, self.figure_c,
                        self.figure_d, self.figure_e, self.figure_f,
                        self.figure_g, self.figure_h]

        print 'Identifying objects in each figure...'
        for figure in self.figures:
            figure.identify_objects()
            figure.find_centroids()

        print 'Loading all solutions and identifying the objects in each...'
        self.solutions = []
        problem_figure_keys = sorted(problem.figures.keys())
        num_solutions = 8
        for i in range(num_solutions):
            figure_sol = Figure(problem.figures[problem_figure_keys[i]].visualFilename)
            figure_sol.identify_objects()
            figure_sol.find_centroids()
            self.solutions.append(figure_sol)
        
        print 'Searching for a solution...'
        answer = self.get_solution()

        print 'Time to find solution: ', time.time() - start_time, ' seconds\n'
        return answer
Пример #12
0
def main(_):
    splitter = PanelSplitterObjDet()

    with open(FLAGS.eval_path) as f:
        lines = f.readlines()

    for idx, filepath in enumerate(lines):
        print(str(idx) + ': ' + filepath)
        filepath = filepath.strip()
        figure = Figure(filepath, padding=0)
        figure.load_image()

        st = time.time()
        panel_split(splitter, figure)
        print('Elapsed time = {}'.format(time.time() - st))

        #  save results
        figure.save_annotation(FLAGS.result_folder, 'panel')
Пример #13
0
    def __init__(self, name):

        app_dir = os.path.dirname(__file__)
        sound_folder = 'sound'
        sound_line = 'success.wav'
        sound_end = 'end.wav'

        self.sounds = {
            'line': QSound(os.path.join(app_dir, sound_folder, sound_line)),
            'end': QSound(os.path.join(app_dir, sound_folder, sound_end))
        }

        self.board = []
        super().__init__()

        self.timer = QBasicTimer()
        self.speed = 390
        self.normal_speed = self.speed
        self.need_acceleration = False

        self.need_animation = False
        self.animation_sleep = False
        self.animation_counter = 0

        self.lines_to_remove = []

        self.need_new_figure = False
        self.cur_x = 0
        self.cur_y = 0
        self.score = 0
        self.is_started = False
        self.is_stopped = False

        self.figure_counter = -1
        self.level = 1

        self.setFocusPolicy(Qt.StrongFocus)
        self.clear()

        self.cur_block = Figure()
        self.next_block = Figure()

        self.result = []
        self.name = name
Пример #14
0
def init():
    for x in range(0, winwidth // segsize):
        for y in range(0, winheigth // segsize):
            positions.append((x * segsize, panel + y * segsize))

    for i in range(1, segnumber, 2):
        f1, f2 = Figure.createpair()
        board.insert(board.index(i), f1)
        board.remove(i)
        board.insert(board.index(i + 1), f2)
        board.remove(i + 1)
Пример #15
0
    def figure_subtract(self, fig1, fig2):
        if fig1.image.size != fig2.image.size:
            raise Exception('Figures must be same size to SUBTRACT them')

        image = copy.deepcopy(fig1.image)

        for obj2 in fig2.objects:
            for xy in obj2.area:
                image.putpixel(xy, 255)

        return Figure(image)
Пример #16
0
    def figure3(self):

        # create figure object
        fig = Figure(self.figurefolder,"figure3", ncols = 2, nrows = 1, figsize = [12/2.54, 7/2.54],bottom = 0.18, top=0.8)

        simulationList = []
        for i in range(1,7):
            case = str(i)
            pretext = "ICE" + case
            if case in ["5", "6"]:
                posttext = "8h"
Пример #17
0
    def __init__(self, problem):
        self.verbose = True
        self.name = problem.name
        self.problemType = problem.problemType

        self.figures = {}
        for fig in problem.figures.itervalues():
            self.figures[fig.name] = Figure(fig, self.name)

        self.hasVisual = problem.hasVisual
        self.hasVerbal = problem.hasVerbal
        self.global_objects = {}
        self.set_global_objects()
        self.global_objects_count = len(self.global_objects)
Пример #18
0
    def figure_xor(self, fig1, fig2):
        im1 = fig1.image
        im2 = fig2.image
        if im1.size != im2.size:
            raise Exception('Images must be same size to XOR them')
        size = im1.size
        image = Image.new('L', size, color=255)

        for x in range(size[0]):
            for y in range(size[1]):
                xy = x, y
                if im1.getpixel(xy) != im2.getpixel(xy):
                    image.putpixel(xy, 0)
        return Figure(image)
Пример #19
0
    def figure_add(self, fig1, fig2):
        if fig1.image.size != fig2.image.size:
            raise Exception('Figures must be same size to SUBTRACT them')

        size = fig1.image.size
        image = Image.new('L', size, color=255)

        for obj1 in fig1.objects:
            for xy in obj1.area:
                image.putpixel(xy, 0)

        for obj2 in fig2.objects:
            for xy in obj2.area:
                image.putpixel(xy, 0)

        return Figure(image)
Пример #20
0
def generate_statistics(c):
    """
    Generate label statistics for deciding some parameters of the algorithm
    :param c:
    :return:
    """
    list_file = c.list_file

    print('generate_statistics with list_file={0}'.format(list_file))
    input("Press Enter to continue...")

    with open(list_file) as f:
        lines = f.readlines()

    # Remove whitespace characters, and then construct the figures
    figures = [Figure(line.strip()) for line in lines]

    print_progress_bar(0, len(figures), prefix='Progress:', suffix='Complete', length=50)
    for i, figure in enumerate(figures):
        figure.load_gt_annotation(which_annotation='label')
        figure.load_image()
        print_progress_bar(i + 1, len(figures), prefix='Progress:', suffix='Complete', length=50)

    # Figure Image Statistics
    image_width, image_height = [], []
    for figure in figures:
        height, width = figure.image_orig.shape[:2]
        image_width.append(width)
        image_height.append(height)

    print('\nimage width statistics:')
    print(pd.Series(image_width).describe())
    print('\nimage height statistics:')
    print(pd.Series(image_height).describe())

    # Label Statistics
    label_width, label_height = [], []
    for figure in figures:
        for panel in figure.panels:
            width, height = panel.label_rect[2:]
            label_width.append(width)
            label_height.append(height)

    print('\nLabel width statistics:')
    print(pd.Series(label_width).describe())
    print('\nLabel height statistics:')
    print(pd.Series(label_height).describe())
Пример #21
0
def test_rpn_hog():
    """
    HOG+RPN for panel label recognition
    :return:
    """
    parser = OptionParser()

    parser.add_option("-p", "--path", dest="test_path", help="Path to test data.",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval.txt')
    parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
                      help="Number of ROIs per iteration. Higher means more memory use.", default=32)
    parser.add_option("--config_filename", dest="config_filename",
                      help="Location to read the metadata related to the training (generated when training).",
                      default="config.pickle")
    parser.add_option("--network", dest="network", help="Base network to use. Supports nn_cnn_3_layer.",
                      default='nn_cnn_3_layer')
    parser.add_option("--rpn_weight_path", dest="rpn_weight_path",  default='./model_rpn.hdf5')
                      # default='/Users/jie/projects/PanelSeg/ExpPython/models/label+bg_rpn_3_layer_color-0.0374.hdf5')
    parser.add_option("--classify_model_path", dest="classify_model_path",
                      default='/Users/jie/projects/PanelSeg/ExpPython/models/label50+bg_cnn_3_layer_color-0.9910.h5')
    parser.add_option("--result_folder", dest="result_folder",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval/rpn_hog/rpn_cpu_0.116')

    (options, args) = parser.parse_args()

    if not options.test_path:  # if filename is not given
        parser.error('Error: path to test data must be specified. Pass --path to command line')

    rpn_c, rpn_model_rpn, rpn_model_classifier, hog = rpn_hog_initialize(options)

    with open(options.test_path) as f:
        lines = f.readlines()

    for idx, filepath in enumerate(lines):
        print(str(idx) + ': ' + filepath)
        # if 'PMC3664797_gkt198f2p' not in filepath:
        #     continue
        # if idx < 243:
        #     continue
        filepath = filepath.strip()
        figure = Figure(filepath)
        figure.load_image()

        st = time.time()

        figure.fg_rois, figure.fg_scores, figure.fg_labels = rpn_hog_detect(figure,
                                                                            rpn_c, rpn_model_rpn, rpn_model_classifier,
                                                                            hog, RPN_ONLY)

        print('Elapsed time = {}'.format(time.time() - st))

        # Save detection results
        figure.save_annotation(options.result_folder)
Пример #22
0
def generate_nonlabel_train_samples(c):

    list_file = c.list_file
    target_folder = c.nonlabels_folder

    print('generate_nonlabel_train_samples with list_file={0} and target_folder={1}'.format(list_file, target_folder))
    input("Press Enter to continue...")

    with open(list_file) as f:
        lines = f.readlines()

    # Remove whitespace characters, and then construct the figures
    figures = [Figure(line.strip()) for line in lines]

    # Clear the folder
    if not os.path.exists(target_folder):
        os.mkdir(target_folder)

    for figure in figures:
        print("Processing {0}".format(figure.id))
        figure.load_image()
        image_height, image_width = figure.image_orig.shape[:2]
        for i in range(50):
            x, y, s = random.randint(-5, image_width-5), random.randint(-5, image_height-5), round(random.gauss(20, 7))
            if (s < 5) or (s > 80):
                continue
            if (x + s - image_width > s / 2) or (0-x > s/2):
                continue
            if (y + s - image_height > s / 2) or (0-y > s/2):
                continue

            rect = (x, y, s, s)
            patch_file = figure.id + "_".join(str(x) for x in rect) + ".png"

            x += figure.PADDING
            y += figure.PADDING
            if c.color_type == cv2.IMREAD_COLOR:
                patch = figure.image[y:y+s, x:x+s]
            else:
                patch = figure.image_gray[y:y+s, x:x+s]

            # if patch.shape

            patch_file = os.path.join(target_folder, patch_file)
            cv2.imwrite(patch_file, patch)
Пример #23
0
def read_samples(path, auto_folder, model_classifier, model_svm):

    with open(path) as f:
        lines = f.readlines()
    # Remove whitespace characters, and then construct the figures
    figures = [Figure(line.strip()) for line in lines]

    X = []
    Y = []
    for i, figure in enumerate(figures):
        figure.load_image()

        # load ground-truth annotation
        gt_rois, gt_labels = load_ground_truth_annotation(figure)

        # load auto annotation
        auto_rois = load_auto_annotation(figure, auto_folder)

        # sort auto annotation with respect to distances to left-up corner (0, 0)
        distances = [roi[0] + roi[1] for roi in auto_rois]
        indexes = np.argsort(distances)
        auto_rois = auto_rois[indexes]

        # match auto to gt to assign y
        y = np.full([auto_rois.shape[0]],
                    len(LABEL_CLASS_MAPPING))  # initialize as non-label
        for gt_i, gt_roi in enumerate(gt_rois):
            ious = [iou_rect(auto_roi, gt_roi) for auto_roi in auto_rois]
            max_index = np.argmax(ious)
            if ious[max_index] > 0.25:
                y[max_index] = LABEL_CLASS_MAPPING[map_label(gt_labels[gt_i])]
        Y.append(y)

        # extract features
        x = feature_extraction(figure, auto_rois)

        # if len(x) > 0:
        #     p_label, p_acc, p_val = svmutil.svm_predict(y, x.tolist(), model_svm, '-b 1')
        #     x = np.array(p_val)

        X.append(x)

    return X, Y
Пример #24
0
def main(_):
    splitter = PanelSplitterObjDet()

    with open(FLAGS.eval_path) as f:
        lines = f.readlines()

    for idx, filepath in enumerate(lines):
        print(str(idx) + ': ' + filepath)
        filepath = filepath.strip()
        figure = Figure(filepath, padding=0)
        figure.load_image()

        st = time.time()
        panel_split(splitter, figure)
        print('Elapsed time = {}'.format(time.time() - st))

        #  save results
        figure.save_annotation(FLAGS.result_folder, 'panel')
Пример #25
0
    def addFigure(self, Figure):
        for figure in self.figures:
            if figure.pos == Figure.pos and figure.short == Figure.short:
                #print(figure.pos, Figure,' nije uspio add')
                if Figure.short == 'K' or Figure.short == 'k':
                    if self.board[Figure.getPos()[0]][Figure.getPos()
                                                      [1]] != Figure.short:
                        self.board[Figure.getPos()[0]][Figure.getPos()
                                                       [1]] = Figure.short

                return None

        if self.is_the_piece_on_the_board_pos(Figure.pos):
            self.board[Figure.getPos()[0]][Figure.getPos()[1]] = Figure.short
            self.figures.append(Figure)
            #print('uspio add', Figure)

            return Figure
        return None
Пример #26
0
    def create_figure(self, x1, y1, x2, y2, color):

        figure = Figure(x1, y1, x2, y2, color, color, "figure", self.canvas)
        self.figures.append(figure)
Пример #27
0
    return Figure(image)

def figure_subtract(fig1, fig2):
    if fig1.image.size != fig2.image.size:
        raise Exception('Figures must be same size to SUBTRACT them')

    image = copy.deepcopy(fig1.image)

    for obj2 in fig2.objects:
        for xy in obj2.area:
            image.putpixel(xy, 255)

    return Figure(image)

fig_e10_a = Figure("E-10-A.png")
fig_e10_b = Figure("E-10-B.png")

print 'Identifying objects...'
fig_e10_a.identify_objects()
fig_e10_b.identify_objects()

print 'Doing the maths...'
figure_and(fig_e10_a, fig_e10_b)
figure_xor(fig_e10_a, fig_e10_b)
figure_add(fig_e10_b, fig_e10_a)
figure_subtract(fig_e10_b, fig_e10_a)

print 'Done\n'

Пример #28
0
def augment(img_data, config, augment=True):
    assert 'filepath' in img_data
    assert 'bboxes' in img_data
    assert 'width' in img_data
    assert 'height' in img_data

    img_data_aug = copy.deepcopy(img_data)

    figure = Figure(img_data_aug['filepath'])
    figure.load_image()
    # img = cv2.imread(img_data_aug['filepath'])
    img = figure.image

    if augment:
        rows, cols = img.shape[:2]

        if config.use_horizontal_flips and np.random.randint(0, 2) == 0:
            img = cv2.flip(img, 1)
            for bbox in img_data_aug['bboxes']:
                x1 = bbox['x1']
                x2 = bbox['x2']
                bbox['x2'] = cols - x1
                bbox['x1'] = cols - x2

        if config.use_vertical_flips and np.random.randint(0, 2) == 0:
            img = cv2.flip(img, 0)
            for bbox in img_data_aug['bboxes']:
                y1 = bbox['y1']
                y2 = bbox['y2']
                bbox['y2'] = rows - y1
                bbox['y1'] = rows - y2

        if config.rot_90:
            angle = np.random.choice([0,90,180,270],1)[0]
            if angle == 270:
                img = np.transpose(img, (1,0,2))
                img = cv2.flip(img, 0)
            elif angle == 180:
                img = cv2.flip(img, -1)
            elif angle == 90:
                img = np.transpose(img, (1,0,2))
                img = cv2.flip(img, 1)
            elif angle == 0:
                pass

            for bbox in img_data_aug['bboxes']:
                x1 = bbox['x1']
                x2 = bbox['x2']
                y1 = bbox['y1']
                y2 = bbox['y2']
                if angle == 270:
                    bbox['x1'] = y1
                    bbox['x2'] = y2
                    bbox['y1'] = cols - x2
                    bbox['y2'] = cols - x1
                elif angle == 180:
                    bbox['x2'] = cols - x1
                    bbox['x1'] = cols - x2
                    bbox['y2'] = rows - y1
                    bbox['y1'] = rows - y2
                elif angle == 90:
                    bbox['x1'] = rows - y2
                    bbox['x2'] = rows - y1
                    bbox['y1'] = x1
                    bbox['y2'] = x2
                elif angle == 0:
                    pass

    img_data_aug['width'] = img.shape[1]
    img_data_aug['height'] = img.shape[0]
    return img_data_aug, img
Пример #29
0
    def get_solution(self):
        answer = -1

        # *** REAL CODE ***
        # Check for holistic symmetry
        vertical_symmetry_measures = []
        horizontal_symmetry_measures = []
        for solution in self.solutions:
            self.figure_sol = solution
            holistic_image = self.create_merged_image()
            vertical_symmetry_measures.append(
                self.get_vertical_symmetry_measure(holistic_image))
            horizontal_symmetry_measures.append(
                self.get_horizontal_symmetry_measure(holistic_image))
        # Check vertical
        max_measure = max(vertical_symmetry_measures)
        if max_measure > self.threshold:
            return vertical_symmetry_measures.index(max_measure) + 1
        # Check horizontal
        max_measure = max(horizontal_symmetry_measures)
        if max_measure > self.threshold:
            return horizontal_symmetry_measures.index(max_measure) + 1

        # Horizontal transforms alone have been sufficient for the practice problems encountered
        transform = self.get_transform(self.figure_a, self.figure_b,
                                       self.figure_c)

        # These values used later on
        self.figure_g.identify_objects()
        self.figure_g.find_centroids()
        self.figure_h.identify_objects()
        self.figure_h.find_centroids()

        if transform[0] == 'resize':
            # if at this point, only 2 objects in figure
            # Get size of object in question and get scale factor from transform data
            obj = self.figure_h.objects[1]
            width_obj, height_obj = obj.size()
            width_trans, height_trans = transform[1]
            scale = (1 + width_trans / float(width_obj),
                     1 + height_trans / float(height_obj))

            im = self.figure_h.image
            width, height = im.size
            im_resized = im.resize(
                (int(width * scale[0]), int(height * scale[1])),
                Image.BILINEAR)
            width_new, height_new = im_resized.size
            width_diff = width_new - width
            height_diff = height_new - height
            box = (width_diff / 2, height_diff / 2, width_new - width_diff / 2,
                   height_new - height_diff / 2)
            fig = Figure(im_resized.crop(box))
            answer = self.find_most_similar_solution(fig)

        elif transform[0] == 'add and translate':
            translate_distance = transform[1]
            obj1 = self.figure_g.objects[1]
            size = self.figure_g.image.size

            init_l_val = 255
            obj1_new = Object((0, 0), 0)
            obj2_new = Object((0, 0), 0)
            obj1_new.remove_pixel((0, 0))
            obj2_new.remove_pixel((0, 0))

            # Slide first two objects away from each other
            for coord in obj1.area:
                obj1_new.add_pixel(
                    (coord[0] + translate_distance * 2, coord[1]))
                obj2_new.add_pixel(
                    (coord[0] - translate_distance * 2, coord[1]))

            # Make new image with translated objects
            image_new = Image.new('L', size, color=init_l_val)
            for xy in obj1_new.area:
                image_new.putpixel(xy, 0)
            for xy in obj2_new.area:
                image_new.putpixel(xy, 0)
            for xy in obj1.area:
                image_new.putpixel(xy, 0)

            fig = Figure(image_new)
            answer = self.find_most_similar_solution(fig)

        elif transform[0] == 'horizontal pass through':
            for solution in self.solutions:
                solution.identify_objects()
                num_dark_obj = 0
                for obj in solution.objects:
                    if obj.l_val < 128:
                        num_dark_obj += 1
                if num_dark_obj < 2:
                    continue

                size = solution.image.size
                im_centroid = (size[0] / 2, size[1] / 2)
                max_distance = size[0] / 2
                for i in xrange(2, max_distance, 2):
                    objects_new = []
                    for obj in solution.objects:
                        if obj.l_val < 128:
                            # On the left side
                            if obj.centroid[0] < im_centroid[0]:
                                obj_new = self.translate_object(obj, (i, 0))
                            # On the right side
                            else:
                                obj_new = self.translate_object(obj, (-i, 0))

                            objects_new.append(obj_new)
                    image_new = self.image_from_objects(size, objects_new)
                    if self.is_equal(image_new, solution.image):
                        answer = self.solutions.index(solution) + 1
                        return answer

        return answer
Пример #30
0
def test_lstm():
    parser = OptionParser()

    parser.add_option("--eval_path", dest="eval_path", help="Path to eval data.",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval.txt')
    parser.add_option("--eval_auto_folder", dest="eval_auto_folder",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval/rpn_hog/rpn')
    parser.add_option("--classify_model_path", dest="classify_model_path",
                      default='/Users/jie/projects/PanelSeg/ExpPython/models/label50+bg_cnn_3_layer_color-0.9910.h5')
    # parser.add_option("--svm_model_path", dest="svm_model_path",
    #                   default='/Users/jie/projects/PanelSeg/Exp/LabelClassifySvmTrain/SVMModel-51classes-with-neg/svm_model_rbf_8.0_0.125')
    parser.add_option("--lstm_model_path", dest="lstm_model_path",
                      default='/Users/jie/projects/PanelSeg/ExpPython/models/lstm_model_train_0.25eval_epoch_9.h5')
    parser.add_option("--result_folder", dest="result_folder",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval/rpn_hog_lstm/eval')

    (options, args) = parser.parse_args()

    # model_classifier = load_model(options.classify_model_path)
    # model_classifier.summary()

    # model_svm = svmutil.svm_load_model(options.svm_model_path)
    model_svm = None

    model_lstm = load_model(options.lstm_model_path)
    model_lstm.summary()

    with open(options.eval_path) as f:
        lines = f.readlines()

    for idx, filepath in enumerate(lines):
        print(str(idx) + ': ' + filepath)
        # if '1757-1626-0002-0000008402-001' not in filepath:
        #     continue
        # if idx < 37:
        #     continue
        filepath = filepath.strip()
        figure = Figure(filepath)
        figure.load_image()
        st = time.time()

        # load detection results by RPN
        rois = load_auto_annotation(figure, options.eval_auto_folder)

        # sort auto annotation with respect to distances to left-up corner (0, 0)
        distances = [roi[0] + roi[1] for roi in rois]
        indexes = np.argsort(distances)
        rois = rois[indexes]

        x = feature_extraction(figure, rois)

        if rois.size == 0:
            figure.fg_rois, figure.fg_scores, figure.fg_labels = None, None, None
        else:
            # x = x.tolist()
            # y = np.zeros(len(x)).tolist()
            # p_label, p_acc, p_val = svmutil.svm_predict(y, x, model_svm, '-b 1')
            # x = np.array(p_val)

            _x = np.expand_dims(x, axis=0)
            y_hat = model_lstm.predict(_x)

            # figure.fg_rois, figure.fg_scores, figure.fg_labels = max_y_hat(rois, y_hat[0])
            figure.fg_rois, figure.fg_scores, figure.fg_labels = beam_search_with_neg(rois, y_hat[0], 5)

        print('Elapsed time = {}'.format(time.time() - st))

        # Save detection results
        figure.save_annotation(options.result_folder)
Пример #31
0
def plot4Sets(caseCollection,
              simulationCollection,
              annotationCollection,
              simulationDataFrames,
              figurefolder,
              figurename,
              ncVariable,
              designVariable,
              conversionNC=1.0,
              conversionDesign=1.0,
              xmax=1000,
              ymax=1000,
              xAxisLabel=None,
              xAxisUnit=None,
              yAxisLabel=None,
              yAxisUnit=None,
              keisseja=10000,
              yPositionCorrection=100,
              outlierParameter=0.2):

    relativeChangeDict = Data.emptyDictionaryWithKeys(caseCollection)
    print(" ")
    print(figurename)
    # create figure object
    fig = Figure(figurefolder,
                 figurename,
                 ncols=2,
                 nrows=2,
                 sharex=True,
                 sharey=True)
    # plot timeseries with unit conversion
    maks = 0
    mini = 0
    for ind, case in enumerate(caseCollection):
        for emul in list(simulationCollection[case])[:keisseja]:
            dataset = simulationCollection[case][emul].getTSDataset()
            muuttuja = dataset[ncVariable]

            alku = simulationDataFrames[case].loc[emul][
                designVariable] * conversionDesign
            loppu = muuttuja.sel(
                time=slice(2.5, 3.5)).mean().values * conversionNC
            relChangeParam = loppu / alku

            relativeChangeDict[case][emul] = relChangeParam

            if relChangeParam > 1 + outlierParameter:
                color = Colorful.getDistinctColorList("red")
                zorderParam = 10
            elif relChangeParam < 1 - outlierParameter:
                color = Colorful.getDistinctColorList("blue")
                zorderParam = 9
            else:
                color = "white"
                zorderParam = 6

            maks = max(relChangeParam, maks)
            mini = min(relChangeParam, mini)

            fig.getAxes(True)[ind].plot(alku,
                                        loppu,
                                        marker="o",
                                        markerfacecolor=color,
                                        markeredgecolor="black",
                                        markeredgewidth=0.2,
                                        markersize=6,
                                        alpha=0.5,
                                        zorder=zorderParam)
Пример #32
0
                                                    yPositionCorrection=300)
        simulationDataFrames = mergeDataFrameWithParam(simulationDataFrames,
                                                       cloudTopParameters,
                                                       "zcRel")
        cloudTopFig.save()

    if cloudTopOutliersFlag:
        cloutTopOutliers = {}
        for ind, case in enumerate(list(simulationDataFrames)):
            cloudTopOutliers[case] = simulationDataFrames[case].where(
                simulationDataFrames[case]["zcRel"] <
                simulationDataFrames[case]["zcRel"])

        fig2 = Figure(figurefolder,
                      "cloudtopOutliers",
                      ncols=2,
                      nrows=2,
                      sharex=True,
                      sharey=True)
        # plot timeseries with unit conversion
        cloudTopOutliersColors = Colorful.getIndyColorList(
            len(cloudTopOutliers))
        for ind, case in enumerate(caseCollection):
            for emulInd, emul in enumerate(cloudTopOutliers):
                try:
                    simulation = simulationCollection[case][emul]
                except KeyError:
                    continue
                dataset = simulation.getTSDataset()
                muuttuja = dataset["zc"][1:] / (
                    simulationDataFrames[case].loc[emul]["pblh_m"])
Пример #33
0
def test_lstm():
    parser = OptionParser()

    parser.add_option(
        "--eval_path",
        dest="eval_path",
        help="Path to eval data.",
        default='/Users/jie/projects/PanelSeg/ExpPython/eval.txt')
    parser.add_option(
        "--eval_auto_folder",
        dest="eval_auto_folder",
        default='/Users/jie/projects/PanelSeg/ExpPython/eval/rpn_hog/rpn')
    parser.add_option(
        "--classify_model_path",
        dest="classify_model_path",
        default=
        '/Users/jie/projects/PanelSeg/ExpPython/models/label50+bg_cnn_3_layer_color-0.9910.h5'
    )
    # parser.add_option("--svm_model_path", dest="svm_model_path",
    #                   default='/Users/jie/projects/PanelSeg/Exp/LabelClassifySvmTrain/SVMModel-51classes-with-neg/svm_model_rbf_8.0_0.125')
    parser.add_option(
        "--lstm_model_path",
        dest="lstm_model_path",
        default=
        '/Users/jie/projects/PanelSeg/ExpPython/models/lstm_model_train_0.25eval_epoch_9.h5'
    )
    parser.add_option(
        "--result_folder",
        dest="result_folder",
        default='/Users/jie/projects/PanelSeg/ExpPython/eval/rpn_hog_lstm/eval'
    )

    (options, args) = parser.parse_args()

    # model_classifier = load_model(options.classify_model_path)
    # model_classifier.summary()

    # model_svm = svmutil.svm_load_model(options.svm_model_path)
    model_svm = None

    model_lstm = load_model(options.lstm_model_path)
    model_lstm.summary()

    with open(options.eval_path) as f:
        lines = f.readlines()

    for idx, filepath in enumerate(lines):
        print(str(idx) + ': ' + filepath)
        # if '1757-1626-0002-0000008402-001' not in filepath:
        #     continue
        # if idx < 37:
        #     continue
        filepath = filepath.strip()
        figure = Figure(filepath)
        figure.load_image()
        st = time.time()

        # load detection results by RPN
        rois = load_auto_annotation(figure, options.eval_auto_folder)

        # sort auto annotation with respect to distances to left-up corner (0, 0)
        distances = [roi[0] + roi[1] for roi in rois]
        indexes = np.argsort(distances)
        rois = rois[indexes]

        x = feature_extraction(figure, rois)

        if rois.size == 0:
            figure.fg_rois, figure.fg_scores, figure.fg_labels = None, None, None
        else:
            # x = x.tolist()
            # y = np.zeros(len(x)).tolist()
            # p_label, p_acc, p_val = svmutil.svm_predict(y, x, model_svm, '-b 1')
            # x = np.array(p_val)

            _x = np.expand_dims(x, axis=0)
            y_hat = model_lstm.predict(_x)

            # figure.fg_rois, figure.fg_scores, figure.fg_labels = max_y_hat(rois, y_hat[0])
            figure.fg_rois, figure.fg_scores, figure.fg_labels = beam_search_with_neg(
                rois, y_hat[0], 5)

        print('Elapsed time = {}'.format(time.time() - st))

        # Save detection results
        figure.save_annotation(options.result_folder)
Пример #34
0
    def getIce4Simulations(self):
        return self.ice4simulations

    def getAllSimulations(self):
        return self.allSimulations

    def getUCLALESSALSASimulations(self):
        return self.uclalesSimulations


figObject = ManuscriptFigures(
    "/home/aholaj/Nextcloud/figures_updated/manuscriptSimulationData_Rad.csv",
    os.environ["SIMULATIONFIGUREFOLDER"])

fig = Figure(figObject.figurefolder, "figure2RAD", ncols=2, nrows=3)

simulationList = FigureRadSimulationList()

for k in simulationList.getAllSimulations():
    try:
        figObject.simulationCollection[k].getTSDataset()
        figObject.simulationCollection[k].setTimeCoordToHours()
    except FileNotFoundError:
        if "ovchinnikov" in str(
                figObject.simulationCollection[k].getFolder()).lower():
            print(
                "Ovchinnikov data is not available. Continue with existingsimulations"
            )
            continue
        else:
Пример #35
0
def combine_hog_rpn():
    """
    Load results from HOG and RPN methods and then combine them
    :return:
    """
    parser = OptionParser()

    parser.add_option("-p", "--path", dest="test_path", help="Path to test data.",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval.txt')
    parser.add_option("--hog_folder", dest="hog_folder",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval/rpn_hog/hog')
    parser.add_option("--rpn_folder", dest="rpn_folder",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval/rpn_hog/rpn_cpu_0.0374')
    parser.add_option("--result_folder", dest="result_folder",
                      default='/Users/jie/projects/PanelSeg/ExpPython/eval/rpn_hog/hog_rpn')

    (options, args) = parser.parse_args()

    with open(options.test_path) as f:
        lines = f.readlines()

    for idx, filepath in enumerate(lines):
        print(str(idx) + ': ' + filepath)
        # if 'PMC3664797_gkt198f2p' not in filepath:
        #     continue
        # if idx < 243:
        #     continue
        filepath = filepath.strip()
        figure = Figure(filepath)
        figure.load_image()

        # Load HOG result
        hog_file_path = os.path.join(options.hog_folder, figure.file).replace('.jpg', '_data.xml')
        figure.load_annotation(hog_file_path)
        hog_rois = np.empty([len(figure.panels), 4], dtype=int)
        for i in range(len(figure.panels)):
            hog_rois[i] = figure.panels[i].label_rect
        hog_rois[:, 0] += Figure.PADDING
        hog_rois[:, 1] += Figure.PADDING
        hog_labels = np.full(hog_rois.shape[0], Panel.LABEL_ALL)
        hog_scores = np.full(hog_rois.shape[0], 1.0)

        # Load RPN result
        rpn_file_path = os.path.join(options.rpn_folder, figure.file).replace('.jpg', '_data.xml')
        figure.load_annotation(rpn_file_path)
        rpn_rois = np.empty([len(figure.panels), 4], dtype=int)
        for i in range(len(figure.panels)):
            rpn_rois[i] = figure.panels[i].label_rect
        rpn_rois[:, 0] += Figure.PADDING
        rpn_rois[:, 1] += Figure.PADDING
        rpn_labels = np.full(rpn_rois.shape[0], Panel.LABEL_ALL)
        rpn_scores = np.full(rpn_rois.shape[0], 1.0)

        # Keep all regions detected by both RPN and HOG methods.
        # rois, scores, labels = rpn_hog_combine_all(hog_rois, hog_scores, hog_labels, rpn_rois, rpn_scores, rpn_labels)

        # Keep only regions agreed by both methods. IOU > 25%
        rois, scores, labels = rpn_hog_combine_agreed(hog_rois, hog_scores, hog_labels,
                                                      rpn_rois, rpn_scores, rpn_labels, 0.125)

        figure.fg_rois, figure.fg_scores, figure.fg_labels = rois, scores, labels

        # Save detection results
        figure.save_annotation(options.result_folder)
Пример #36
0
 def __init__(self, position, white):
     Figure.__init__(self,position)
     if( white ):
         self.white = True
     else:
         self.white = False
Пример #37
0
def load_samples(path):
    with open(path) as f:
        lines = f.readlines()
    # Remove whitespace characters, and then construct the figures
    figures = [Figure(line.strip()) for line in lines]
    return figures