Example #1
0
def download_file(filename):
    tmp_filename = const.TMP_PATH+'/'+filename
    utils.make_dir(os.path.dirname(tmp_filename))
    logger.debug('Trying to download %s to %s from bucket %s', filename, tmp_filename, S3_BUCKET)
    s3.download_file(S3_BUCKET, filename, tmp_filename,
                     Callback=ProgressPercentage(tmp_filename))
    logger.debug('%s downloaded', filename)
    def generate_annotations_file(self, root_dir, frames_dir, annotations_dir):
        coded_ann_in_dirs = os.listdir(frames_dir)
        print(annotations_dir)

        for d in coded_ann_in_dirs:
            folder_dir = os.path.join(frames_dir, d)
            name_dir = os.path.join(annotations_dir, d)
            make_dir(name_dir)

            dict_seq = {}
            text_name = str(name_dir) + "/dictionary.txt"
            text_file = open(text_name, "w")

            #obj_ids = []
            print(name_dir)

            k = 1

            files_folder = sorted(os.listdir(folder_dir))

            for f in files_folder:
                print("File folder: ", f)
                if f.endswith(self.ext):

                    image_file = os.path.join(folder_dir, f)
                    img = np.array(Image.open(image_file))
                    #obj_ids = np.append(obj_ids, np.unique(img))
                    axis_x = img.shape[0]
                    axis_y = img.shape[1]

                    for x in range(axis_x):

                        for y in range(axis_y):
                            if img[x, y] == 0:
                                img[x, y] = 0
                            elif img[x, y] == 10000:
                                img[x, y] = 0

                            else:

                                if str(img[x, y]) in dict_seq:
                                    i = dict_seq[str(img[x, y])]
                                    img[x, y] = i

                                else:
                                    dict_seq.update({str(img[x, y]): k})
                                    img[x, y] = k
                                    k += 1
                    new_img = Image.fromarray(img)
                    name_file = os.path.join(name_dir, f)
                    new_img.save(name_file)

            inverse_dict = {}
            for k in dict_seq.keys():
                inverse_dict.update({str(dict_seq[k]): int(k)})

            text_file.write(json.dumps(inverse_dict))
            text_file.close()
Example #3
0
def evaluate_all(gt_file_dir, gt_img_dir, ckpt_path, gpuid='0'):
    db = DB(ckpt_path, gpuid)

    img_list = os.listdir(gt_img_dir)

    show = './eva'
    make_dir(show)

    total_TP = 0
    total_gt_care_num = 0
    total_pred_care_num = 0
    for img_name in tqdm.tqdm(img_list):
        img = cv2.imread(os.path.join(gt_img_dir, img_name))

        pred_box_list, pred_score_list, _ = db.detect_img(os.path.join(
            gt_img_dir, img_name),
                                                          ispoly=True,
                                                          show_res=False)

        gt_file_name = os.path.splitext(img_name)[0] + '.txt'

        gt_boxes, tags = load_ctw1500_labels(
            os.path.join(gt_file_dir, gt_file_name))

        gt_care_list = []
        gt_dontcare_list = []

        for i, box in enumerate(gt_boxes):
            box = box.reshape((-1, 2)).tolist()
            if tags[i] == False:
                gt_care_list.append(box)
            else:
                gt_dontcare_list.append(box)

        precision, recall, f1_score, TP, gt_care_num, pred_care_num, pairs_list = evaluate(
            gt_care_list, gt_dontcare_list, pred_box_list, overlap=0.5)

        for pair in pairs_list:
            cv2.polylines(img,
                          [np.array(pair['gt'], np.int).reshape([-1, 1, 2])],
                          True, (0, 255, 0))
            cv2.polylines(img,
                          [np.array(pair['pred'], np.int).reshape([-1, 1, 2])],
                          True, (255, 0, 0))

        cv2.imwrite(os.path.join(show, img_name), img)

        total_TP += TP
        total_gt_care_num += gt_care_num
        total_pred_care_num += pred_care_num

    total_precision = float(total_TP) / total_pred_care_num
    total_recall = float(total_TP) / total_gt_care_num
    total_f1_score = compute_f1_score(total_precision, total_recall)

    return total_precision, total_recall, total_f1_score
def crop_videos(dataset_json, input_dir, output_dir, dataset="kinetics_train"):
    # video_files = os.path.join(directory_json, directory_json_files)
    json_file = os.path.join(dataset_json, dataset + ".json")
    with open(json_file) as f:
        data = json.load(f)
    video_file_dir = os.path.join(input_dir, dataset, "*.mp4")
    video_files = glob.glob(video_file_dir)
    print(video_file_dir)
    for video in video_files:
        print("Reading ", video)
        video_key = os.path.basename(video).split(".")[0]
        segment = data[video_key]["annotations"]["segment"]
        # print(segment)
        cap = cv2.VideoCapture(video)
        # print(cap.get(cv2.CAP_PROP_FPS))

        video_fps = cap.get(cv2.CAP_PROP_FPS)
        video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        time = int((segment[1] - segment[0]) * video_fps)

        frames = np.empty((time, video_height, video_width, 3))

        i = 0
        frame_count = -1
        j = 0
        while (cap.isOpened()):
            ret, frame = cap.read()
            # print(video)
            if not ret:
                break

            frame_count = frame_count + 1

            current_time = frame_count / video_fps

            if current_time < segment[0]:
                # j = 0
                # print("Current time ", current_time)
                continue
            else:
                if j == time:
                    print("Breaking")
                    break
                # cv2.imshow('frame1', frame)
                # cv2.waitKey(25)
                frames[j] = frame
                j = j + 1
            # else:
            # 	break

        save_dir = os.path.join(output_dir, dataset)
        make_dir(save_dir)
        output_file = os.path.join(output_dir, dataset, video_key + ".avi")
        write_to_video(frames, output_file, video_height, video_width,
                       video_fps)
Example #5
0
 def __init__(self, model):  # do not modify this section
     self.config = config()  # fetches the configuration from the json file
     make_dir()  # creates the Experiment directory
     self.logs = Log()  # creates the log file
     self.summary = None  # initializes summary variable
     params()  # pushes parameters values into log file
     self.model = model  # initialize the model passed
     self.data = SplitProcess()  # creates object of SplitProcess class
     self.data.split()  # splits the data
     self.data.process_data()  # process the data
Example #6
0
 def save(self, size):
     '''
     Saves replay memory (attributes and arrays).
     '''
     # Create out dir
     utils.make_dir(self.data_dir)
     print('Saving Memory data into Replay Memory Instance...')
     # Save property dict
     with open('{}/properties.json'.format(self.data_dir), 'w') as f:
         json.dump(self.to_dict(size), f)
     # Save arrays
     self._save_arrays(self.data_dir, size)
Example #7
0
    def save_train_data(self,
                        model_name,
                        log_path,
                        config_path,
                        dataset,
                        save_graph=False):
        make_dir(log_path + '/' + model_name)

        torch.save(self.net, os.path.join(log_path, model_name, model_name))
        copyfile(config_path,
                 log_path + '/' + model_name + '/{}.log'.format(model_name))
        with open(log_path + '/' + model_name + '/dataset', 'w') as file:
            file.write(json.dumps(dataset))
Example #8
0
def multi_thread_scrapping(output_dir, catalog, product, num_thread=20):
    make_dir(output_dir)
    exist_files = get_exist_files(output_dir)
    cad_infos = extract_data(catalog, product['partID'])
    unfiltered = pd.DataFrame(cad_infos)
    args = []
    for idx, cad_info in unfiltered.iterrows():
        if cad_info['partNumber'] not in exist_files:
            args.append({'catalog': catalog, 'output_dir': output_dir, 'cad_info': cad_info})

    pool = ThreadPool(num_thread)
    pool.imap_unordered(download_thread, args)
    pool.close()
    pool.join()
Example #9
0
def run(keyword, title_matching=False):
    per_search = 100
    init_results = search(keyword, per_search, offset=0)
    total = init_results['total']
    total_search = total // per_search
    insert_search_log(keyword, total)
    output_dir = f'{dw_path}/{keyword}'
    make_dir(output_dir)
    keyword_id = get_keyword_id(keyword)
    print(f'{total} models found')

    for i in range(total_search + 1):
        results = search(keyword, per_search, offset=i * per_search)
        for item in tqdm(results['entries']):
            try:
                id = item['id']
                name = filter_escape_char(item['title'])

                if is_model(id):
                    continue

                if title_matching and keyword not in item['title'].lower():
                    continue

                zip_file = download(output_dir, item)
                if not zip_file:
                    continue

                unzipped_dir = unzip_file(zip_file)
                files = filter_files(unzipped_dir)
                for file in files:
                    moved_file = move_file(join(unzipped_dir, file),
                                           output_dir)
                    obj_file = convert_to_obj(moved_file)

                    # if 'bot_smontage' in item['binaryNames']:
                    #     image = item['binaries']['bot_smontage']['contentUrl']
                    # else:
                    image = item['binaries']['bot_lt']['contentUrl']

                    insert_dw_file(id, name, image, obj_file, keyword_id)

                shutil.rmtree(unzipped_dir)

            except Exception as e:
                logging.error(f'[{keyword}]:{e}')

    clean_dir(output_dir)
    create_image(output_dir)
Example #10
0
    def create_figures(self):

        acc_samples = 0
        results_root_dir = os.path.join('../models', args.model_name,
                                        args.model_name + '_results')
        make_dir(results_root_dir)
        results_dir = os.path.join(results_root_dir, 'A1')
        make_dir(results_dir)
        print "Creating annotations for leaves validation..."
        for batch_idx, (inputs, targets) in enumerate(self.loader):
            x, y_mask, y_class, sw_mask, sw_class = batch_to_var(
                self.args, inputs, targets)
            out_masks, _, stop_probs = test(self.args, self.encoder,
                                            self.decoder, x)

            for sample in range(self.batch_size):
                sample_idx = self.sample_list[sample + acc_samples]
                image_dir = os.path.join(sample_idx.split('.')[0] + '.png')
                im = scipy.misc.imread(image_dir)
                h = im.shape[0]
                w = im.shape[1]

                mask_sample = np.zeros([h, w])
                sample_idx = sample_idx.split('/')[-1].split('.')[0]
                img_masks = out_masks[sample]

                instance_id = 0
                class_scores = stop_probs[sample]

                for time_step in range(self.T):
                    mask = img_masks[time_step].cpu().numpy()
                    mask = scipy.misc.imresize(mask, [h, w])

                    class_scores_mask = class_scores[time_step].cpu().numpy()
                    class_score = class_scores_mask[0]
                    if class_score > args.class_th:
                        mask_sample[mask > args.mask_th * 255] = time_step
                        instance_id += 1

                file_name = os.path.join(results_dir, sample_idx + '.png')
                file_name_prediction = file_name.replace(
                    'rgb.png', 'label.png')

                im = Image.fromarray(mask_sample).convert('L')
                im.save(file_name_prediction)

            acc_samples += self.batch_size
Example #11
0
    def load(self):
        '''
        Loads replay memory (attributes and arrays) into self, if possible.
        '''
        # Create out dir
        utils.make_dir(self.data_dir)
        try:
            print('Loading Memory data into Replay Memory Instance...')
            # Load property list
            d = json.load(open('{}/properties.json'.format(self.data_dir)))
            # Load numpy arrays
            self._load_arrays(self.data_dir, d['saved_size'])

            print('Finished loading Memory data into Replay Memory Instance!')

        except IOError as e:
            self.__init__(self.memory_size,
                          data_dir=self.data_dir, load_existing=False)
            print("I/O error({0}): {1}".format(e.errno, e.strerror))
            print("Couldn't find initial values for Replay Memory, instance init as new.")
Example #12
0
    def prepare_data(
            self
    ) -> Tuple[str, str, str]:
        temporary_dir = self.new_path_provider.provide()
        logging.info('Created temporary directory: %s', temporary_dir)
        training_dir = make_dir(temporary_dir, self.TRAINING_DIR)
        test_dir = make_dir(temporary_dir, self.TEST_DIR)

        examples_by_category = get_examples_by_category(self.data_path_provider.provide())
        test_size = math.ceil(
            min([len(examples) for examples in examples_by_category.values()]) * self.configuration.test_data)
        for category in examples_by_category:
            category_examples = examples_by_category[category].copy()
            random.shuffle(category_examples)

            test_examples = category_examples[:test_size]
            category_test_dir = make_dir(test_dir, category.name)
            copy_files_to_dir(test_examples, category_test_dir)

            training_examples = category_examples[test_size:]
            category_training_examples = make_dir(training_dir, category.name)
            copy_files_to_dir(training_examples, category_training_examples)

        return training_dir, test_dir, temporary_dir
Example #13
0
def init():
    make_dir(output_dir)
    make_dir(log_dir)
    make_dir(val_result_dir)
    make_dir(val_result_pr_dir)
Example #14
0
import torch
from torch.utils.data import DataLoader

from torch.utils.tensorboard import SummaryWriter
from utils.losses import maskedMSE
from utils.utils import Settings, make_dir
from utils.utils import get_multi_object_dataset, get_multi_object_net

args = Settings()

make_dir(args.log_path + args.model_type + '/')
make_dir(args.models_path + args.model_type + '/')
logger = SummaryWriter(args.log_path + args.name)

# logger.add_hparams(args.get_dict(), {})

trSet, valSet = get_multi_object_dataset()

net = get_multi_object_net()

if args.optimizer == 'Ranger':
    optimizer = Ranger(net.parameters(),
                       lr=args.lr,
                       alpha=0.5,
                       k=5,
                       weight_decay=1e-3)
elif args.optimizer == 'Adam':
    optimizer = torch.optim.Adam(net.parameters(),
                                 lr=args.lr,
                                 weight_decay=1e-3)
else:
Example #15
0
import cv2
import torch
from tqdm import tqdm

from opts import Opts
from utils.utils import test_time_aug, make_dir, img_predict


def predict():
    with tqdm(total=args.n_test, desc=f'Predict', unit='img') as p_bar:
        for index, i in enumerate(os.listdir(args.dir_test)):
            save_path = os.path.join(args.dir_result, i)
            image = cv2.imread(os.path.join(args.dir_test, i))
            img_predict(args, image, save_path=save_path)
            p_bar.update(1)


if __name__ == '__main__':
    args = Opts().init()
    args.dir_test = os.path.join(args.dir_data, 'test')
    args.n_test = len(os.listdir(args.dir_test))
    args.net.load_state_dict(
        torch.load(
            os.path.join(args.dir_log, f'{args.dataset}_{args.arch}_{args.exp_id}.pth'), map_location=args.device
        )
    )
    if args.tta:
        args.net = test_time_aug(args.net, merge_mode='mean')
    make_dir(dir_path=args.dir_result)
    predict()
Example #16
0
def train_loop(args,
               encoder,
               decoder,
               train_loader,
               valid_loader,
               optimizer_encoder,
               optimizer_decoder,
               outpath,
               device=None):
    if device is None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    assert (args.save_dir
            is not None), "Please specify directory of saving the models!"
    make_dir(args.save_dir)

    train_avg_losses = []
    train_dts = []
    valid_avg_losses = []
    valid_dts = []

    for ep in range(args.num_epochs):
        if args.load_toTrain:
            epoch = args.load_epoch + ep + 1
        else:
            epoch = ep

        # Training
        start = time.time()
        train_avg_loss, train_gen_imgs = train(args,
                                               encoder,
                                               decoder,
                                               train_loader,
                                               epoch,
                                               optimizer_encoder,
                                               optimizer_decoder,
                                               outpath,
                                               is_train=True,
                                               device=device)
        train_dt = time.time() - start

        train_avg_losses.append(train_avg_loss)
        train_dts.append(train_dt)

        save_data(data=train_avg_loss,
                  data_name="loss",
                  epoch=epoch,
                  outpath=outpath,
                  is_train=True)
        save_data(data=train_dt,
                  data_name="dt",
                  epoch=epoch,
                  outpath=outpath,
                  is_train=True)

        # Validation
        start = time.time()
        valid_avg_loss, valid_gen_imgs = test(args,
                                              encoder,
                                              decoder,
                                              valid_loader,
                                              epoch,
                                              optimizer_encoder,
                                              optimizer_decoder,
                                              outpath,
                                              device=device)
        valid_dt = time.time() - start

        valid_avg_losses.append(train_avg_loss)
        valid_dts.append(valid_dt)

        save_data(data=valid_avg_loss,
                  data_name="loss",
                  epoch=epoch,
                  outpath=outpath,
                  is_train=False)
        save_data(data=valid_dt,
                  data_name="dt",
                  epoch=epoch,
                  outpath=outpath,
                  is_train=False)

        print(
            f'epoch={epoch+1}/{args.num_epochs if not args.load_toTrain else args.num_epochs+args.load_epoch}, '
            +
            f'train_loss={train_avg_loss}, valid_loss={valid_avg_loss}, dt={train_dt+valid_dt}'
        )

        if (epoch > 0) and ((epoch + 1) % 10 == 0):
            plot_eval_results(args, (train_avg_losses, valid_avg_losses),
                              f"losses to {epoch+1}",
                              outpath,
                              global_data=False)

    # Save global data
    save_data(data=train_avg_losses,
              data_name="losses",
              epoch="global",
              outpath=outpath,
              is_train=True,
              global_data=True)
    save_data(data=train_dts,
              data_name="dts",
              epoch="global",
              outpath=outpath,
              is_train=True,
              global_data=True)
    save_data(data=valid_avg_losses,
              data_name="losses",
              epoch="global",
              outpath=outpath,
              is_train=False,
              global_data=True)
    save_data(data=valid_dts,
              data_name="dts",
              epoch="global",
              outpath=outpath,
              is_train=False,
              global_data=True)

    return train_avg_losses, valid_avg_losses, train_dts, valid_dts
Example #17
0
def train(args, encoder, decoder, loader, epoch, optimizer_encoder,
          optimizer_decoder, outpath, is_train, device):
    epoch_total_loss = 0
    labels = []
    gen_imgs = []
    if args.compareFigs:
        original = []

    if is_train:
        encoder.train()
        decoder.train()
    # else:
    #     encoder.eval()
    #     decoder.eval()

    for i, batch in enumerate(loader, 0):
        X, Y = batch[0].to(device), batch[1]
        batch_gen_imgs = decoder(encoder(X), args)

        loss = ChamferLoss(device)
        batch_loss = loss(batch_gen_imgs, X)
        epoch_total_loss += batch_loss.item()

        # True if batch_loss has at least one NaN value
        if (batch_loss != batch_loss).any():
            raise RuntimeError('Batch loss is NaN!')

        # back prop
        if is_train:
            optimizer_encoder.zero_grad()
            optimizer_decoder.zero_grad()
            batch_loss.backward()
            optimizer_encoder.step()
            optimizer_decoder.step()
        #     print(f"epoch {epoch+1}, batch {i+1}/{len(loader)}, train_loss={batch_loss.item()}", end='\r', flush=True)
        # else:
        #     print(f"epoch {epoch+1}, batch {i+1}/{len(loader)}, valid_loss={batch_loss.item()}", end='\r', flush=True)

        # Save all generated images
        if args.save_figs and args.save_allFigs:
            labels.append(Y.cpu())
            gen_imgs.append(torch.tanh(batch_gen_imgs).cpu())
            if args.compareFigs:
                original.append(X.cpu())

        # Save only the last batch
        elif args.save_figs:
            if (i == len(loader) - 1):
                labels.append(Y.cpu())
                gen_imgs.append(torch.tanh(batch_gen_imgs).cpu())
                if args.compareFigs:
                    original.append(X.cpu())

    # Save model
    if is_train:
        make_dir(f'{outpath}/weights_encoder')
        make_dir(f'{outpath}/weights_decoder')
        torch.save(
            encoder.state_dict(),
            f"{outpath}/weights_encoder/epoch_{epoch+1}_encoder_weights.pth")
        torch.save(
            decoder.state_dict(),
            f"{outpath}/weights_decoder/epoch_{epoch+1}_decoder_weights.pth")

    # Compute average loss
    epoch_avg_loss = epoch_total_loss / len(loader)
    save_data(epoch_avg_loss, "loss", epoch, is_train, outpath)

    for i in range(len(gen_imgs)):
        if args.compareFigs:
            save_gen_imgs(gen_imgs[i],
                          labels[i],
                          epoch,
                          is_train,
                          outpath,
                          originals=original[i].cpu())
        else:
            save_gen_imgs(gen_imgs[i], labels[i], epoch, is_train, outpath)

    return epoch_avg_loss, gen_imgs
Example #18
0
def save_seq_results(model_name, seq_name, prev_mask):

    results_dir = os.path.join('../models', model_name,
                               'masks_sep_2assess_val_davis', seq_name)
    submission_dir = os.path.join('../models', model_name, 'Annotations-davis',
                                  seq_name)
    lmdb_env_seq = lmdb.open(osp.join(cfg.PATH.DATA, 'lmdb_seq'))
    make_dir(submission_dir)
    prev_assignment = []
    images_dir = os.path.join('../../databases/DAVIS2017/JPEGImages/480p/',
                              seq_name)
    image_names = os.listdir(images_dir)
    image_names.sort()
    starting_img_name = image_names[0]
    starting_frame = int(starting_img_name[:-4])
    key_db = osp.basename(seq_name)
    with lmdb_env_seq.begin() as txn:
        _files_vec = txn.get(key_db.encode()).decode().split('|')
        _files = [osp.splitext(f)[0] for f in _files_vec]

    frame_names = _files

    frame_names.sort()
    frame_idx = 0
    obj_ids_sorted_increasing_jaccard = []

    for frame_name in frame_names:

        if frame_idx == 0:

            annotation = np.array(
                Image.open('../../databases/DAVIS2017/Annotations/480p/' +
                           seq_name + '/' + frame_name + '.png'))
            instance_ids = sorted(np.unique(annotation))
            instance_ids = instance_ids if instance_ids[0] else instance_ids[1:]
            if len(instance_ids) > 0:
                instance_ids = instance_ids[:-1] if instance_ids[
                    -1] == 255 else instance_ids

            res_im = Image.fromarray(annotation, mode="P")
            res_im.putpalette(PALETTE)
            res_im.save(submission_dir + '/' + frame_name + '.png')

            #compute assignment between predictions from first frame and ground truth from first frame
            if prev_mask == True:
                num_preds = len(instance_ids)
            else:
                num_preds = 10
            cost = np.ones((len(instance_ids), num_preds))
            for obj_id in instance_ids:
                annotation_obj = np.zeros(annotation.shape)
                annotation_obj[annotation == obj_id] = 1
                for pred_id in range(num_preds):
                    if prev_mask == True:
                        pred_mask = imread(results_dir + '/' + frame_name +
                                           '_instance_%02d.png' % pred_id)
                    else:
                        pred_mask = imread(
                            results_dir + '/' + '%05d_instance_%02d.png' %
                            (starting_frame + frame_idx, pred_id))
                    pred_mask_resized = imresize(pred_mask,
                                                 annotation.shape,
                                                 interp='nearest')
                    cost[obj_id - 1, pred_id] = 1 - jaccard_simple(
                        annotation_obj, pred_mask_resized)

            row_ind, col_ind = linear_sum_assignment(cost)

            prev_assignment = col_ind

            cost_objs = {}
            for obj_id in instance_ids:
                cost_objs[obj_id] = cost[obj_id - 1,
                                         prev_assignment[obj_id - 1]]
            obj_ids_sorted_increasing_jaccard = sorted(cost_objs.items(),
                                                       key=lambda kv: kv[1],
                                                       reverse=True)

        else:

            pred_mask_resized = np.zeros(annotation.shape, dtype=np.uint8)

            for obj_id, jaccard_val in obj_ids_sorted_increasing_jaccard:
                instance_assigned_id = prev_assignment[obj_id - 1]
                if prev_mask == True:
                    pred_mask = imread(results_dir + '/' + frame_name +
                                       '_instance_%02d.png' %
                                       instance_assigned_id)
                else:
                    pred_mask = imread(
                        results_dir + '/' + '%05d_instance_%02d.png' %
                        (starting_frame + frame_idx, instance_assigned_id))

                pred_mask_resized_aux = imresize(pred_mask,
                                                 annotation.shape,
                                                 interp='nearest')
                pred_mask_resized[pred_mask_resized_aux == 255] = obj_id

            res_im = Image.fromarray(pred_mask_resized, mode="P")
            res_im.putpalette(PALETTE)
            res_im.save(submission_dir + '/' + frame_name + '.png')

        frame_idx = frame_idx + 1
Example #19
0
def create_res_catalogue(outcat_prefix, RESULTS_CAT, global_parameters):
    outcat = os.path.join(
        RESULTS_CAT, outcat_prefix +
        '_'.join(global_parameters).replace('/', '').replace('.', ''))
    make_dir(outcat)
    return outcat
Example #20
0
def main():
    # Config
    stock_tickers = ["AAPL", "BA", "TSLA"]
    n_stock = len(stock_tickers)
    n_forecast = 0
    n_sentiment = 0
    models_folder = 'saved_models'
    rewards_folder = 'saved_rewards'
    rl_folder = 'saved_models/rl'
    rl_rewards = 'saved_rewards/rl'
    lstm_folder = 'saved_models/lstm'
    news_folder = './data/news'
    forecast_window = 10
    num_episodes = 300
    batch_size = 16
    initial_investment = 10000

    # Parser arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-f',
        '--forecast',
        type=str,
        default=None,
        help='Enable stock forecasting. Select "one" or "multi"')
    parser.add_argument(
        '-s',
        '--sentiment',
        type=bool,
        default=False,
        help='Enable sentiment analysis. Select "True" or "False"')
    parser.add_argument('-a',
                        '--agent',
                        type=str,
                        default="DQN",
                        help='Select "DQN" or "DDPG"')
    args = parser.parse_args()

    make_dir(models_folder)
    make_dir(rewards_folder)
    make_dir(rl_folder)
    make_dir(rl_rewards)
    make_dir(lstm_folder)

    # Get data
    data = get_data("./data", stock_tickers)
    print()

    # Generate state (features) based on arguments (forecast & sentiment)
    if args.forecast == None and args.sentiment == False:
        data = data.drop(
            columns="timestamp").iloc[forecast_window:].reset_index(
                drop=True).values

    elif args.forecast != None and args.sentiment == False:
        concat_data = data.iloc[forecast_window:].reset_index(drop=True)
        for ticker in stock_tickers:
            print(f"Performing {ticker} {args.forecast.title()}step Forecast")
            predictions = {}
            predictions[f'{ticker}_Forecast'] = lstm_forecast(
                models_folder, ticker, data, forecast_window,
                args.forecast.lower())

            # predictions[f'{ticker}_Forecast'] = pd.DataFrame(predictions)
            # predictions[f'{ticker}_Forecast'].index = pd.RangeIndex(forecast_window, forecast_window + len(predictions[f'{ticker}_Forecast']))

            concat_data = pd.concat(
                [concat_data, pd.DataFrame(predictions)], join="outer", axis=1)

        print(f"{args.forecast.title()}step Forecasts Added!\n")
        data = concat_data.drop(columns="timestamp").values
        n_forecast = len(stock_tickers)

    elif args.forecast != None and args.sentiment:
        concat_data = data.iloc[forecast_window:].reset_index(drop=True)
        for ticker in stock_tickers:
            print(f"Performing {ticker} {args.forecast.title()}step Forecast")
            predictions = {}
            predictions[f'{ticker}_Forecast'] = lstm_forecast(
                models_folder, ticker, data, forecast_window,
                args.forecast.lower())

            # predictions[f'{ticker}_Forecast'] = pd.DataFrame(predictions)
            # predictions[f'{ticker}_Forecast'].index = pd.RangeIndex(forecast_window, forecast_window + len(predictions[f'{ticker}_Forecast']))

            concat_data = pd.concat(
                [concat_data, pd.DataFrame(predictions)], join="outer", axis=1)

        print(f"{args.forecast.title()}step Forecasts Added!\n")

        for ticker in stock_tickers:
            print(f"Analyzing {ticker} Stock Sentiment")
            sentiment_df = sentiment_analysis(news_folder, ticker)

            concat_data = pd.merge(concat_data,
                                   sentiment_df,
                                   left_on="timestamp",
                                   right_on="publishedAt").drop(
                                       columns="publishedAt", axis=1)

        print("Sentiment Features Added!\n")
        print(concat_data)
        data = concat_data.drop(columns="timestamp").values
        n_forecast = len(stock_tickers)
        n_sentiment = len(stock_tickers)

    elif args.sentiment:
        concat_data = data
        for ticker in stock_tickers:
            print(f"Analyzing {ticker} Stock Sentiment")
            sentiment_df = sentiment_analysis(news_folder, ticker)
            # print(sentiment_df)
            concat_data = pd.merge(concat_data,
                                   sentiment_df,
                                   left_on="timestamp",
                                   right_on="publishedAt").drop(
                                       columns="publishedAt", axis=1)

        print("Sentiment Features Added!\n")
        data = concat_data.drop(columns="timestamp").values
        n_sentiment = len(stock_tickers)

    n_timesteps, _ = data.shape

    n_train = n_timesteps
    train_data = data[:n_train]
    test_data = data[n_train:]

    # Initialize the MultiStock Environment
    env = MultiStockEnv(train_data, n_stock, n_forecast, n_sentiment,
                        initial_investment, "DQN")
    state_size = env.state_dim
    action_size = len(env.action_space)
    agent = DQNAgent(state_size, action_size)
    scaler = get_scaler(env)

    # Store the final value of the portfolio (end of episode)
    portfolio_value = []

    ######### DDPG #########
    # Run with DDPG Agent
    if args.agent.lower() == "ddpg":
        env = MultiStockEnv(train_data, n_stock, n_forecast, n_sentiment,
                            initial_investment, "DDPG")
        DDPGAgent(env, num_episodes)
        exit()
    ######### /DDPG #########

    ######### DQN #########
    # Run with DQN
    # play the game num_episodes times
    print("\nRunning DQN Agent...\n")
    for e in range(num_episodes):
        val = play_one_episode(agent, env, scaler, batch_size)
        print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}")
        portfolio_value.append(val)  # append episode end portfolio value

    # save the weights when we are done
    # save the DQN
    agent.save(f'{models_folder}/rl/dqn.h5')

    # save the scaler
    with open(f'{models_folder}/rl/scaler.pkl', 'wb') as f:
        pickle.dump(scaler, f)

    # save portfolio value for each episode
    np.save(f'{rewards_folder}/rl/dqn.npy', portfolio_value)

    print("\nDQN Agent run complete and saved!")

    a = np.load(f'./saved_rewards/rl/dqn.npy')

    print(
        f"\nCumulative Portfolio Value Average: {a.mean():.2f}, Min: {a.min():.2f}, Max: {a.max():.2f}"
    )
    plt.plot(a)
    plt.title(f"Portfolio Value Per Episode ({args.agent.upper()})")
    plt.ylabel("Portfolio Value")
    plt.xlabel("Episodes")
    plt.show()
Example #21
0
                                                 annotation.shape,
                                                 interp='nearest')
                pred_mask_resized[pred_mask_resized_aux == 255] = obj_id

            res_im = Image.fromarray(pred_mask_resized, mode="P")
            res_im.putpalette(PALETTE)
            res_im.save(submission_dir + '/' + frame_name + '.png')

        frame_idx = frame_idx + 1


if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Plot visual results.')
    parser.add_argument('-model_name', dest='model_name', default='model')
    parser.add_argument('--prev_mask', dest='prev_mask', action='store_true')
    parser.set_defaults(prev_mask=False)
    args = parser.parse_args()

    with open('./dataloader/db_info.yaml', 'r') as f:
        sequences = edict(yaml.load(f)).sequences

    sequences = filter(lambda s: s.set == 'test-dev', sequences)

    submission_base_dir = os.path.join('../models', args.model_name,
                                       'Annotations-davis')
    make_dir(submission_base_dir)

    for seq_name in sequences:
        save_seq_results(args.model_name, seq_name.name, args.prev_mask)
Example #22
0
import logging
import argparse

from datetime import datetime
from database import agent
from scrapper.grab_cad import run as grabCAD_run
from scrapper.dw import run as DW_run
from scrapper.trace_parts import run as traceParts_run
from utils.utils import make_dir
from tools import get_keywords

make_dir('log')
logging.basicConfig(filename=f'log/{datetime.now().strftime("%y%m%d_%H%M%S")}.log', level=logging.ERROR)

parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-k', '--keywords', default=None, type=str)
args = parser.parse_args()


def search(keyword, websites=None):
    if websites is None:
        websites = ['grabcad', '3dw']

    keyword = keyword.lower()

    logging.info(f'KEYWORD: {keyword}')

    if keyword not in get_keywords():
        agent.insert('keyword', ignore=True, **{'name': keyword})

    if 'grabcad' in websites:
Example #23
0
def trainIters(args):
    epoch_resume = 0
    model_dir = os.path.join('../models/',
                             args.model_name + '_prev_inference_mask')

    if args.resume:
        # will resume training the model with name args.model_name
        encoder_dict, decoder_dict, enc_opt_dict, dec_opt_dict, load_args = load_checkpoint(
            args.model_name, args.use_gpu)

        epoch_resume = load_args.epoch_resume
        encoder = FeatureExtractor(load_args)
        decoder = RSISMask(load_args)
        encoder_dict, decoder_dict = check_parallel(encoder_dict, decoder_dict)
        encoder.load_state_dict(encoder_dict)
        decoder.load_state_dict(decoder_dict)

        args = load_args

    elif args.transfer:
        # load model from args and replace last fc layer
        encoder_dict, decoder_dict, _, _, load_args = load_checkpoint(
            args.transfer_from, args.use_gpu)
        encoder = FeatureExtractor(load_args)
        decoder = RSISMask(args)
        encoder_dict, decoder_dict = check_parallel(encoder_dict, decoder_dict)
        encoder.load_state_dict(encoder_dict)
        decoder.load_state_dict(decoder_dict)

    else:
        encoder = FeatureExtractor(args)
        decoder = RSISMask(args)

    # model checkpoints will be saved here
    make_dir(model_dir)

    # save parameters for future use
    pickle.dump(args, open(os.path.join(model_dir, 'args.pkl'), 'wb'))

    encoder_params = get_base_params(args, encoder)
    skip_params = get_skip_params(encoder)
    decoder_params = list(decoder.parameters()) + list(skip_params)
    dec_opt = get_optimizer(args.optim, args.lr, decoder_params,
                            args.weight_decay)
    enc_opt = get_optimizer(args.optim_cnn, args.lr_cnn, encoder_params,
                            args.weight_decay_cnn)

    if args.resume:
        enc_opt.load_state_dict(enc_opt_dict)
        dec_opt.load_state_dict(dec_opt_dict)
        from collections import defaultdict
        dec_opt.state = defaultdict(dict, dec_opt.state)

    if not args.log_term:
        print("Training logs will be saved to:",
              os.path.join(model_dir, 'train.log'))
        sys.stdout = open(os.path.join(model_dir, 'train.log'), 'w')
        sys.stderr = open(os.path.join(model_dir, 'train.err'), 'w')

    print(args)

    # objective function for mask
    mask_siou = softIoULoss()

    if args.use_gpu:
        encoder.cuda()
        decoder.cuda()
        mask_siou.cuda()

    crits = mask_siou
    optims = [enc_opt, dec_opt]
    if args.use_gpu:
        torch.cuda.synchronize()
    start = time.time()

    # vars for early stopping
    best_val_loss = args.best_val_loss
    acc_patience = 0
    mt_val = -1

    # keep track of the number of batches in each epoch for continuity when plotting curves
    loaders = init_dataloaders(args)
    num_batches = {'train': 0, 'val': 0}
    #area_range = [[0 ** 2, 1e5 ** 2], [0 ** 2, 20 ** 2], [20 ** 2, 59 ** 2], [59 ** 2, 1e5 ** 2]]
    area_range = [[0**2, 1e5**2], [0**2, 30**2], [30**2, 90**2],
                  [90**2, 1e5**2]]  #for (287,950))
    resolution = 0

    for e in range(args.max_epoch):
        print("Epoch", e + epoch_resume)
        # store losses in lists to display average since beginning
        epoch_losses = {
            'train': {
                'total': [],
                'iou': []
            },
            'val': {
                'total': [],
                'iou': []
            }
        }
        # total mean for epoch will be saved here to display at the end
        total_losses = {'total': [], 'iou': []}

        # check if it's time to do some changes here
        if e + epoch_resume >= args.finetune_after and not args.update_encoder and not args.finetune_after == -1:
            print("Starting to update encoder")
            args.update_encoder = True
            acc_patience = 0
            mt_val = -1

        if args.loss_penalization:
            if e < 10:
                resolution = area_range[2]
            else:
                resolution = area_range[0]

        # we validate after each epoch
        for split in ['train', 'val']:
            if args.dataset == 'davis2017' or args.dataset == 'youtube' or args.dataset == 'kittimots':
                loaders[split].dataset.set_epoch(e)
                for batch_idx, (inputs, targets, seq_name,
                                starting_frame) in enumerate(loaders[split]):
                    # send batch to GPU

                    prev_hidden_temporal_list = None
                    loss = None
                    last_frame = False
                    max_ii = min(len(inputs), args.length_clip)

                    for ii in range(max_ii):
                        # If are on the last frame from a clip, we will have to backpropagate the loss back to the beginning of the clip.
                        if ii == max_ii - 1:
                            last_frame = True

                        #                x: input images (N consecutive frames from M different sequences)
                        #                y_mask: ground truth annotations (some of them are zeros to have a fixed length in number of object instances)
                        #                sw_mask: this mask indicates which masks from y_mask are valid
                        x, y_mask, sw_mask = batch_to_var(
                            args, inputs[ii], targets[ii])

                        if ii == 0:
                            prev_mask = y_mask

                        # From one frame to the following frame the prev_hidden_temporal_list is updated.
                        loss, losses, outs, hidden_temporal_list = runIter(
                            args, encoder, decoder, x, y_mask, sw_mask,
                            resolution, crits, optims, split, loss,
                            prev_hidden_temporal_list, prev_mask, last_frame)

                        # Hidden temporal state from time instant ii is saved to be used when processing next time instant ii+1
                        if args.only_spatial == False:
                            prev_hidden_temporal_list = hidden_temporal_list

                        prev_mask = outs

                    # store loss values in dictionary separately
                    epoch_losses[split]['total'].append(losses[0])
                    epoch_losses[split]['iou'].append(losses[1])

                    # print after some iterations
                    if (batch_idx + 1) % args.print_every == 0:

                        mt = np.mean(epoch_losses[split]['total'])
                        mi = np.mean(epoch_losses[split]['iou'])

                        te = time.time() - start
                        print("iter %d:\ttotal:%.4f\tiou:%.4f\ttime:%.4f" %
                              (batch_idx, mt, mi, te))
                        if args.use_gpu:
                            torch.cuda.synchronize()
                        start = time.time()

            num_batches[split] = batch_idx + 1
            # compute mean val losses within epoch

            if split == 'val' and args.smooth_curves:
                if mt_val == -1:
                    mt = np.mean(epoch_losses[split]['total'])
                else:
                    mt = 0.9 * mt_val + 0.1 * np.mean(
                        epoch_losses[split]['total'])
                mt_val = mt

            else:
                mt = np.mean(epoch_losses[split]['total'])

            mi = np.mean(epoch_losses[split]['iou'])

            # save train and val losses for the epoch
            total_losses['iou'].append(mi)
            total_losses['total'].append(mt)

            args.epoch_resume = e + epoch_resume

            print("Epoch %d:\ttotal:%.4f\tiou:%.4f\t(%s)" % (e, mt, mi, split))

        if mt < (best_val_loss - args.min_delta):
            print("Saving checkpoint.")
            best_val_loss = mt
            args.best_val_loss = best_val_loss
            # saves model, params, and optimizers
            save_checkpoint_prev_inference_mask(args, encoder, decoder,
                                                enc_opt, dec_opt)
            acc_patience = 0
        else:
            acc_patience += 1

        if acc_patience > args.patience and not args.update_encoder and not args.finetune_after == -1:
            print("Starting to update encoder")
            acc_patience = 0
            args.update_encoder = True
            best_val_loss = 1000  # reset because adding a loss term will increase the total value
            mt_val = -1
            encoder_dict, decoder_dict, enc_opt_dict, dec_opt_dict, _ = load_checkpoint(
                args.model_name, args.use_gpu)
            encoder.load_state_dict(encoder_dict)
            decoder.load_state_dict(decoder_dict)
            enc_opt.load_state_dict(enc_opt_dict)
            dec_opt.load_state_dict(dec_opt_dict)

        # early stopping after N epochs without improvement
        if acc_patience > args.patience_stop:
            break
Example #24
0
    def run_eval(self):
        print("Dataset is %s" % (self.dataset))
        print("Split is %s" % (self.split))

        if args.overlay_masks:

            colors = []
            palette = sequence_palette()
            inv_palette = {}
            for k, v in palette.items():
                inv_palette[v] = k
            num_colors = len(inv_palette.keys())
            for id_color in range(num_colors):
                if id_color == 0 or id_color == 21:
                    continue
                c = inv_palette[id_color]
                colors.append(c)

        if self.split == 'val':

            if args.dataset == 'youtube':

                masks_sep_dir = os.path.join('../models', args.model_name,
                                             'masks_sep_2assess')
                make_dir(masks_sep_dir)
                if args.overlay_masks:
                    results_dir = os.path.join('../models', args.model_name,
                                               'results')
                    make_dir(results_dir)

                json_data = open(
                    '../../databases/YouTubeVOS/train/train-val-meta.json')
                data = json.load(json_data)

            else:

                masks_sep_dir = os.path.join('../models', args.model_name,
                                             'masks_sep_2assess-davis')
                make_dir(masks_sep_dir)
                if args.overlay_masks:
                    results_dir = os.path.join('../models', args.model_name,
                                               'results-davis')
                    make_dir(results_dir)

            for batch_idx, (inputs, targets, seq_name,
                            starting_frame) in enumerate(self.loader):

                prev_hidden_temporal_list = None
                max_ii = min(len(inputs), args.length_clip)

                base_dir_masks_sep = masks_sep_dir + '/' + seq_name[0] + '/'
                make_dir(base_dir_masks_sep)

                if args.overlay_masks:
                    base_dir = results_dir + '/' + seq_name[0] + '/'
                    make_dir(base_dir)

                for ii in range(max_ii):

                    #                x: input images (N consecutive frames from M different sequences)
                    #                y_mask: ground truth annotations (some of them are zeros to have a fixed length in number of object instances)
                    #                sw_mask: this mask indicates which masks from y_mask are valid
                    x, y_mask, sw_mask = batch_to_var(args, inputs[ii],
                                                      targets[ii])

                    print(seq_name[0] + '/' + '%05d' %
                          (starting_frame[0] + ii))

                    #from one frame to the following frame the prev_hidden_temporal_list is updated.
                    outs, hidden_temporal_list = test(
                        args, self.encoder, self.decoder, x,
                        prev_hidden_temporal_list)

                    if args.dataset == 'youtube':
                        num_instances = len(
                            data['videos'][seq_name[0]]['objects'])
                    else:
                        num_instances = 1  #int(torch.sum(sw_mask.data).data.cpu().numpy())

                    x_tmp = x.data.cpu().numpy()
                    height = x_tmp.shape[-2]
                    width = x_tmp.shape[-1]
                    for t in range(10):
                        mask_pred = (torch.squeeze(outs[0,
                                                        t, :])).cpu().numpy()
                        mask_pred = np.reshape(mask_pred, (height, width))
                        indxs_instance = np.where(mask_pred > 0.5)
                        mask2assess = np.zeros((height, width))
                        mask2assess[indxs_instance] = 255
                        toimage(mask2assess, cmin=0,
                                cmax=255).save(base_dir_masks_sep +
                                               '%05d_instance_%02d.png' %
                                               (starting_frame[0] + ii, t))

                    if args.overlay_masks:

                        frame_img = x.data.cpu().numpy()[0, :, :, :].squeeze()
                        frame_img = np.transpose(frame_img, (1, 2, 0))
                        mean = np.array([0.485, 0.456, 0.406])
                        std = np.array([0.229, 0.224, 0.225])
                        frame_img = std * frame_img + mean
                        frame_img = np.clip(frame_img, 0, 1)
                        plt.figure()
                        plt.axis('off')
                        plt.figure()
                        plt.axis('off')
                        plt.imshow(frame_img)

                        for t in range(num_instances):
                            mask_pred = (torch.squeeze(
                                outs[0, t, :])).cpu().numpy()
                            mask_pred = np.reshape(mask_pred, (height, width))
                            ax = plt.gca()
                            tmp_img = np.ones(
                                (mask_pred.shape[0], mask_pred.shape[1], 3))
                            color_mask = np.array(colors[t]) / 255.0
                            for i in range(3):
                                tmp_img[:, :, i] = color_mask[i]
                            ax.imshow(np.dstack((tmp_img, mask_pred * 0.7)))

                        figname = base_dir + 'frame_%02d.png' % (
                            starting_frame[0] + ii)
                        plt.savefig(figname, bbox_inches='tight')
                        plt.close()

                    if self.video_mode:
                        prev_hidden_temporal_list = hidden_temporal_list

        else:

            if args.dataset == 'youtube':

                masks_sep_dir = os.path.join('../models', args.model_name,
                                             'masks_sep_2assess_val')
                make_dir(masks_sep_dir)
                if args.overlay_masks:
                    results_dir = os.path.join('../models', args.model_name,
                                               'results_val')
                    make_dir(results_dir)

                json_data = open('../../databases/YouTubeVOS/val/meta.json')
                data = json.load(json_data)

            else:

                masks_sep_dir = os.path.join('../models', args.model_name,
                                             'masks_sep_2assess_val_davis')
                make_dir(masks_sep_dir)
                if args.overlay_masks:
                    results_dir = os.path.join('../models', args.model_name,
                                               'results_val_davis')
                    make_dir(results_dir)

            for batch_idx, (inputs, seq_name,
                            starting_frame) in enumerate(self.loader):

                prev_hidden_temporal_list = None
                max_ii = min(len(inputs), args.length_clip)

                for ii in range(max_ii):

                    #                x: input images (N consecutive frames from M different sequences)
                    x = batch_to_var_test(args, inputs[ii])

                    print(seq_name[0] + '/' + '%05d' %
                          (starting_frame[0] + ii))

                    if ii == 0:

                        if args.dataset == 'youtube':

                            num_instances = len(
                                data['videos'][seq_name[0]]['objects'])

                        else:

                            annotation = Image.open(
                                '../../databases/DAVIS2017/Annotations/480p/' +
                                seq_name[0] + '/00000.png')
                            instance_ids = sorted(np.unique(annotation))
                            instance_ids = instance_ids if instance_ids[
                                0] else instance_ids[1:]
                            if len(instance_ids) > 0:
                                instance_ids = instance_ids[:-1] if instance_ids[
                                    -1] == 255 else instance_ids
                            num_instances = len(instance_ids)

                    #from one frame to the following frame the prev_hidden_temporal_list is updated.
                    outs, hidden_temporal_list = test(
                        args, self.encoder, self.decoder, x,
                        prev_hidden_temporal_list)

                    base_dir_masks_sep = masks_sep_dir + '/' + seq_name[0] + '/'
                    make_dir(base_dir_masks_sep)

                    if args.overlay_masks:
                        base_dir = results_dir + '/' + seq_name[0] + '/'
                        make_dir(base_dir)

                    x_tmp = x.data.cpu().numpy()
                    height = x_tmp.shape[-2]
                    width = x_tmp.shape[-1]
                    for t in range(10):
                        mask_pred = (torch.squeeze(outs[0,
                                                        t, :])).cpu().numpy()
                        mask_pred = np.reshape(mask_pred, (height, width))
                        indxs_instance = np.where(mask_pred > 0.5)
                        mask2assess = np.zeros((height, width))
                        mask2assess[indxs_instance] = 255
                        toimage(mask2assess, cmin=0,
                                cmax=255).save(base_dir_masks_sep +
                                               '%05d_instance_%02d.png' %
                                               (starting_frame[0] + ii, t))

                    if args.overlay_masks:

                        frame_img = x.data.cpu().numpy()[0, :, :, :].squeeze()
                        frame_img = np.transpose(frame_img, (1, 2, 0))
                        mean = np.array([0.485, 0.456, 0.406])
                        std = np.array([0.229, 0.224, 0.225])
                        frame_img = std * frame_img + mean
                        frame_img = np.clip(frame_img, 0, 1)
                        plt.figure()
                        plt.axis('off')
                        plt.figure()
                        plt.axis('off')
                        plt.imshow(frame_img)

                        for t in range(num_instances):

                            mask_pred = (torch.squeeze(
                                outs[0, t, :])).cpu().numpy()
                            mask_pred = np.reshape(mask_pred, (height, width))
                            ax = plt.gca()
                            tmp_img = np.ones(
                                (mask_pred.shape[0], mask_pred.shape[1], 3))
                            color_mask = np.array(colors[t]) / 255.0
                            for i in range(3):
                                tmp_img[:, :, i] = color_mask[i]
                            ax.imshow(np.dstack((tmp_img, mask_pred * 0.7)))

                        figname = base_dir + 'frame_%02d.png' % (
                            starting_frame[0] + ii)
                        plt.savefig(figname, bbox_inches='tight')
                        plt.close()

                    if self.video_mode:
                        prev_hidden_temporal_list = hidden_temporal_list
Example #25
0
from torch.utils.tensorboard import SummaryWriter
from losses import maskedNLL, maskedMSE
from utils.utils import Settings, get_dataset, get_net, make_dir

args = Settings()

def lr_scheduler(optim, iter):
    if iter < 10:
        optim.param_groups[0]['lr'] = args.lr/10 *iter
    elif iter > 30:
        optim.param_groups[0]['lr'] = args.lr*(30/iter)
    else:
        optim.param_groups[0]['lr'] = args.lr

make_dir(args.log_path + 'unique_object/' + args.model_type + '/')
make_dir(args.models_path + 'unique_object/' + args.model_type + '/')
logger = SummaryWriter(args.log_path + 'unique_object/' + args.model_type + '/' + args.name)

# logger.add_hparams(args.get_dict(), {})

trSet, valSet = get_dataset()

net = get_net()

if args.optimizer == 'Ranger':
    optimizer = Ranger(net.parameters(), lr=args.lr, alpha=0.5, k=5)
elif args.optimizer == 'Adam':
    optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
else:
    optimizer = torch.optim.SGD(net.parameters(), lr=args.lr)
def main(args):

    # make the export folder structure
    # this is made here because the Logger uses the filename
    if args.do_save:
        # make a base save directory
        utils.make_dir(args.save_dir)

        # make a directory in the base save directory with for the specific
        # method.
        save_subdir = os.path.join(args.save_dir,
                                   args.dataset + "_" + args.sampling_method)
        utils.make_dir(save_subdir)

        filename = os.path.join(
            save_subdir,
            "log-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt")
        sys.stdout = utils.Logger(filename)

    # confusion argument can have multiple values
    confusions = [float(t) for t in args.confusions.split(" ")]
    mixtures = [float(t) for t in args.active_sampling_percentage.split(" ")]
    max_dataset_size = None if args.max_dataset_size == 0 else args.max_dataset_size
    starting_seed = args.seed

    # get the dataset from file based on the data directory and dataset name
    X, y = utils.get_mldata(args.data_dir, args.dataset)

    # object to store the results in
    all_results = {}

    # percentage of labels to randomize
    for c in confusions:

        # Mixture weights on active sampling."
        for m in mixtures:

            # the number of curves created during multiple trials
            for seed in range(starting_seed, starting_seed + args.trials):

                # get the sampler based on the name
                # returns a python object
                # also named: query strategy
                sampler = get_AL_sampler(args.sampling_method)

                # get the model
                score_model = utils.get_model(args.score_method, seed)

                #
                if (args.select_method == "None"
                        or args.select_method == args.score_method):
                    select_model = None
                else:
                    select_model = utils.get_model(args.select_method, seed)

                # create the learning curve
                results, sampler_state = generate_one_curve(
                    X,
                    y,
                    sampler,
                    score_model,
                    seed,
                    args.warmstart_size,
                    args.batch_size,
                    select_model,
                    confusion=c,
                    active_p=m,
                    max_points=max_dataset_size,
                    standardize_data=args.standardize_data,
                    norm_data=args.normalize_data,
                    train_horizon=args.train_horizon)
                key = (args.dataset, args.sampling_method, args.score_method,
                       args.select_method, m, args.warmstart_size,
                       args.batch_size, c, args.standardize_data,
                       args.normalize_data, seed)
                sampler_output = sampler_state.to_dict()
                results["sampler_output"] = sampler_output
                all_results[key] = results

    # Not sure why this is done in a qay like this.
    fields = [
        "dataset", "sampler", "score_method", "select_method",
        "active percentage", "warmstart size", "batch size", "confusion",
        "standardize", "normalize", "seed"
    ]
    all_results["tuple_keys"] = fields

    # write the results to a file
    if args.do_save:

        # format the filename
        filename = "results_score_{}_select_{}_norm_{}_stand_{}".format(
            args.score_method, args.select_method, args.normalize_data,
            args.standardize_data)

        existing_files = gfile.Glob(
            os.path.join(save_subdir, "{}*.pkl".format(filename)))
        filepath = os.path.join(
            save_subdir, "{}_{}.pkl".format(filename,
                                            1000 + len(existing_files))[1:])

        # dump the dict to a pickle file
        pickle.dump(all_results, gfile.GFile(filepath, "w"))

        # flush stfout
        sys.stdout.flush_file()
Example #27
0
    def create_figures(self):

        acc_samples = 0
        results_dir = os.path.join('../models', args.model_name,
                                   args.model_name + '_results')

        make_dir(results_dir)
        masks_dir = os.path.join(args.model_name + '_masks')
        abs_masks_dir = os.path.join(results_dir, masks_dir)
        make_dir(abs_masks_dir)
        print "Creating annotations for cityscapes validation..."
        for batch_idx, (inputs, targets) in enumerate(self.loader):
            x, y_mask, y_class, sw_mask, sw_class = batch_to_var(
                self.args, inputs, targets)
            out_masks, out_scores, stop_probs = test(self.args, self.encoder,
                                                     self.decoder, x)

            class_ids = [24, 25, 26, 27, 28, 31, 32, 33]

            for sample in range(self.batch_size):

                sample_idx = self.sample_list[sample + acc_samples]
                image_dir = os.path.join(sample_idx.split('.')[0] + '.png')
                im = scipy.misc.imread(image_dir)
                h = im.shape[0]
                w = im.shape[1]

                sample_idx = sample_idx.split('/')[-1].split('.')[0]

                results_file = open(
                    os.path.join(results_dir, sample_idx + '.txt'), 'w')
                img_masks = out_masks[sample]

                instance_id = 0

                class_scores = out_scores[sample]
                stop_scores = stop_probs[sample]

                for time_step in range(self.T):
                    mask = img_masks[time_step].cpu().numpy()
                    mask = (mask > args.mask_th)

                    h_mask = mask.shape[0]
                    w_mask = mask.shape[1]

                    mask = (mask > 0)
                    labeled_blobs = measure.label(mask, background=0).flatten()

                    # find the biggest one
                    count = Counter(labeled_blobs)
                    s = []
                    max_num = 0
                    for v, k in count.iteritems():
                        if v == 0:
                            continue
                        if k > max_num:
                            max_num = k
                            max_label = v
                    # build mask from the largest connected component
                    segmentation = (labeled_blobs == max_label).astype("uint8")
                    mask = segmentation.reshape([h_mask, w_mask]) * 255

                    mask = scipy.misc.imresize(mask, [h, w])
                    class_scores_mask = class_scores[time_step].cpu().numpy()
                    stop_scores_mask = stop_scores[time_step].cpu().numpy()
                    class_score = np.argmax(class_scores_mask)

                    for i in range(len(class_scores_mask) - 1):
                        name_instance = sample_idx + '_' + str(
                            instance_id) + '.png'
                        pred_class_score = class_scores_mask[i + 1]
                        objectness = stop_scores_mask[0]
                        pred_class_score *= objectness
                        scipy.misc.imsave(
                            os.path.join(abs_masks_dir, name_instance), mask)
                        results_file.write(masks_dir + '/' + name_instance +
                                           ' ' + str(class_ids[i]) + ' ' +
                                           str(pred_class_score) + '\n')
                        instance_id += 1

                results_file.close()

            acc_samples += self.batch_size
def save_seq_results(model_name, seq_name, prev_mask):

    results_dir = os.path.join('../models', model_name,
                               'masks_sep_2assess_val', seq_name)
    submission_dir = os.path.join('../models', model_name, 'Annotations',
                                  seq_name)
    make_dir(submission_dir)
    json_data = open('../../databases/YouTubeVOS/val/meta.json')
    data = json.load(json_data)
    seq_data = data['videos'][seq_name]['objects']
    prev_obj_ids = []
    prev_assignment = []
    images_dir = os.path.join('../../databases/YouTubeVOS/val/JPEGImages/',
                              seq_name)
    image_names = os.listdir(images_dir)
    image_names.sort()
    starting_img_name = image_names[0]
    starting_frame = int(starting_img_name[:-4])
    frame_names = []
    min_obj_id = 5
    max_obj_id = 1
    for obj_id in seq_data.keys():
        if int(obj_id) < min_obj_id:
            min_obj_id = int(obj_id)
        if int(obj_id) > max_obj_id:
            max_obj_id = int(obj_id)
        for frame_name in seq_data[obj_id]['frames']:
            if frame_name not in frame_names:
                frame_names.append(frame_name)

    frame_names.sort()
    frame_idx = 0
    obj_ids_sorted_increasing_jaccard = []

    for frame_name in frame_names:
        obj_ids = []
        for obj_id in seq_data.keys():
            if frame_name in seq_data[obj_id]['frames']:
                obj_ids.append(int(obj_id))

        if frame_idx == 0:

            annotation = np.array(
                Image.open('../../databases/YouTubeVOS/val/Annotations/' +
                           seq_name + '/' + frame_name + '.png'))

            res_im = Image.fromarray(annotation, mode="P")
            res_im.putpalette(PALETTE)
            res_im.save(submission_dir + '/' + frame_name + '.png')

            #compute assignment between predictions from first frame and ground truth from first frame
            if prev_mask == True:
                num_preds = max_obj_id - min_obj_id + 1
            else:
                num_preds = 10
            cost = np.ones((max_obj_id - min_obj_id + 1, num_preds))
            for obj_id in obj_ids:
                annotation_obj = np.zeros(annotation.shape)
                annotation_obj[annotation == obj_id] = 1
                for pred_id in range(num_preds):
                    if prev_mask == True:
                        pred_mask = imread(results_dir + '/' + frame_name +
                                           '_instance_%02d.png' % pred_id)
                    else:
                        pred_mask = imread(
                            results_dir + '/' + '%05d_instance_%02d.png' %
                            (starting_frame + frame_idx, pred_id))
                    pred_mask_resized = imresize(pred_mask,
                                                 annotation.shape,
                                                 interp='nearest')
                    cost[obj_id - min_obj_id, pred_id] = 1 - jaccard_simple(
                        annotation_obj, pred_mask_resized)

            row_ind, col_ind = linear_sum_assignment(cost)
            prev_assignment = col_ind
            prev_obj_ids = obj_ids

            cost_objs = {}
            for obj_id in obj_ids:
                cost_objs[obj_id] = cost[obj_id - min_obj_id,
                                         prev_assignment[obj_id - min_obj_id]]
            obj_ids_sorted_increasing_jaccard = sorted(cost_objs.items(),
                                                       key=lambda kv: kv[1],
                                                       reverse=True)

        else:

            new_elems = []
            for obj_id in obj_ids:
                if obj_id not in prev_obj_ids:
                    new_elems.append(obj_id)

            pred_mask_resized = np.zeros(annotation.shape, dtype=np.uint8)

            if len(new_elems) == 0:

                for obj_id, jaccard_val in obj_ids_sorted_increasing_jaccard:
                    instance_assigned_id = prev_assignment[obj_id - min_obj_id]
                    if prev_mask == True:
                        pred_mask = imread(results_dir + '/' + frame_name +
                                           '_instance_%02d.png' %
                                           instance_assigned_id)
                    else:
                        pred_mask = imread(
                            results_dir + '/' + '%05d_instance_%02d.png' %
                            (starting_frame + frame_idx, instance_assigned_id))

                    pred_mask_resized_aux = imresize(pred_mask,
                                                     annotation.shape,
                                                     interp='nearest')
                    pred_mask_resized[pred_mask_resized_aux == 255] = obj_id

                res_im = Image.fromarray(pred_mask_resized, mode="P")
                res_im.putpalette(PALETTE)
                res_im.save(submission_dir + '/' + frame_name + '.png')

            else:

                prev_cost_objs = cost_objs
                cost_objs = {}

                annotation = np.array(
                    Image.open('../../databases/YouTubeVOS/val/Annotations/' +
                               seq_name + '/' + frame_name + '.png'))

                if prev_mask == True:
                    num_preds = max_obj_id - min_obj_id + 1
                else:
                    num_preds = 10
                cost = np.ones((max_obj_id - min_obj_id + 1, num_preds))
                for obj_id in obj_ids:
                    if obj_id in prev_obj_ids:
                        cost[obj_id - min_obj_id,
                             prev_assignment[obj_id - min_obj_id]] = 0
                    else:
                        annotation_obj = np.zeros(annotation.shape)
                        annotation_obj[annotation == obj_id] = 1
                        for pred_id in range(num_preds):
                            if prev_mask == True:
                                pred_mask = imread(results_dir + '/' +
                                                   frame_name +
                                                   '_instance_%02d.png' %
                                                   pred_id)
                            else:
                                pred_mask = imread(
                                    results_dir + '/' +
                                    '%05d_instance_%02d.png' %
                                    (starting_frame + frame_idx, pred_id))
                            pred_mask_resized = imresize(pred_mask,
                                                         annotation.shape,
                                                         interp='nearest')
                            cost[obj_id - min_obj_id,
                                 pred_id] = 1 - jaccard_simple(
                                     annotation_obj, pred_mask_resized)

                row_ind, col_ind = linear_sum_assignment(cost)
                prev_assignment = col_ind

                for obj_id in obj_ids:
                    if obj_id in prev_obj_ids:
                        cost_objs[obj_id] = prev_cost_objs[obj_id]
                    else:
                        cost_objs[obj_id] = cost[obj_id - min_obj_id,
                                                 prev_assignment[obj_id -
                                                                 min_obj_id]]

                obj_ids_sorted_increasing_jaccard = sorted(
                    cost_objs.items(), key=lambda kv: kv[1], reverse=True)

                pred_mask_resized = np.zeros(annotation.shape, dtype=np.uint8)

                for obj_id, jaccard_val in obj_ids_sorted_increasing_jaccard:
                    if obj_id in prev_obj_ids:

                        instance_assigned_id = prev_assignment[obj_id -
                                                               min_obj_id]
                        if prev_mask == True:
                            pred_mask = imread(results_dir + '/' + frame_name +
                                               '_instance_%02d.png' %
                                               instance_assigned_id)
                        else:
                            pred_mask = imread(results_dir + '/' +
                                               '%05d_instance_%02d.png' %
                                               (starting_frame + frame_idx,
                                                instance_assigned_id))
                        pred_mask_resized_aux = imresize(pred_mask,
                                                         annotation.shape,
                                                         interp='nearest')
                        pred_mask_resized[pred_mask_resized_aux ==
                                          255] = obj_id

                for obj_id in obj_ids:
                    if obj_id not in prev_obj_ids:
                        annotation_obj = np.zeros(annotation.shape)
                        annotation_obj[annotation == obj_id] = 255
                        pred_mask_resized[annotation_obj == 255] = obj_id

                res_im = Image.fromarray(pred_mask_resized, mode="P")
                res_im.putpalette(PALETTE)
                res_im.save(submission_dir + '/' + frame_name + '.png')
                prev_obj_ids = obj_ids

        frame_idx = frame_idx + 1
Example #29
0
def save_network(model, episode, out_dir):
    out_dir = '{}/models'.format(out_dir)
    # Make Dir
    make_dir(out_dir)
    # Save model
    torch.save(model.state_dict(), '{}/episode_{}'.format(out_dir, episode))
Example #30
0
        assert (args.load_epoch is not None), 'Which epoch weights to load is not specified!'
        outpath = args.load_modelPath
        if torch.cuda.is_available():
            encoder.load_state_dict(torch.load(f'{outpath}/weights_encoder/epoch_{args.load_epoch}_encoder_weights.pth'))
            decoder.load_state_dict(torch.load(f'{outpath}/weights_decoder/epoch_{args.load_epoch}_decoder_weights.pth'))
        else:
            encoder.load_state_dict(torch.load(f'{outpath}/weights_encoder/epoch_{args.load_epoch}_encoder_weights.pth', map_location=torch.device('cpu')))
            encoder.load_state_dict(torch.load(f'{outpath}/weights_decoder/epoch_{args.load_epoch}_decoder_weights.pth', map_location=torch.device('cpu')))
    # Create new model
    else:
        outpath = f"{args.save_dir}/{gen_fname(args)}"

    if args.customSuffix is not None:
        outpath = f"{outpath}_{args.customSuffix}"

    make_dir(outpath)
    with open(f"{outpath}/args_cache.json", "w") as f:
        json.dump(vars(args), f)

    # Training
    optimizer_encoder = torch.optim.Adam(encoder.parameters(), args.lr)
    optimizer_decoder = torch.optim.Adam(decoder.parameters(), args.lr)
    train_avg_losses, valid_avg_losses, train_dts, valid_dts = train_loop(args, encoder, decoder, train_loader, valid_loader,
                                                                          optimizer_encoder, optimizer_decoder, outpath, device=device)

    '''Plotting evaluation results'''
    plot_eval_results(args, data=(train_avg_losses, valid_avg_losses), data_name="Losses", outpath=outpath)
    plot_eval_results(args, data=(train_dts, valid_dts), data_name="Time durations", outpath=outpath)
    plot_eval_results(args, data=[train_dts[i] + valid_dts[i] for i in range(len(train_dts))], data_name="Total time durations", outpath=outpath)

    print("Completed!")