Beispiel #1
0
 def init(self):
     self.summary_writer = SummaryWriter(self.tensorboard_save_dir)
     self.logger = setup_logger(
     ) if self.log_file is None else setup_logger(self.log_file)
     self.train_loader, self.val_loader = self.get_dataloader()
     self.model, self.train_loss_fn, self.optimizer = self.get_model_loss_optimizer(
     )
     self.lrscher = LRScher(self.learning_rate,
                            self.num_epochs * len(self.train_loader),
                            self.lr_power, self.logger)
Beispiel #2
0
 def __init__(self, phase='test'):
     self.img_size = [256, 256]
     self.whole_model_path = "./snapshots/drsn_yvos_10w_davis_3p5w.pth"
     self.mask_local_home = "./result_davis_mask_local"
     self.mask_home = "./result_davis_mask"
     self.npy_home = "./result_davis_npy"
     self.phase = phase
     self.logger = setup_logger()
     self.ft_lr = None
     self.ft_iters = None
     self.ft_bs = None
Beispiel #3
0
def main():
    exp_dir = TRAIN_CONFIG['exp_dir']
    if not os.path.exists(exp_dir):
        mkdir_p(exp_dir)
    #if len(sys.argv)==2 and sys.argv[1] != '--resume':
    #    assert False, 'Experiment directory {} exists, prohibit overwritting...\n\t**Use python3 train.py --resume to force training...'.format(exp_dir)
    if os.path.exists(TRAIN_CONFIG['logfile']
                      ) and TRAIN_CONFIG['resume_checkpoint'] is None:
        os.remove(TRAIN_CONFIG['logfile'])
    logger = setup_logger(logfile=TRAIN_CONFIG['logfile'])
    os.system('cp {} {}'.format(CONF_FN,
                                osp.join(exp_dir,
                                         CONF_FN.split('/')[-1])))
    trainer = Trainer(logger)
    trainer.train()
Beispiel #4
0
import sys
import os
import os.path as osp
import setproctitle
import importlib
import torch
from skimage import io
CURR_DIR = osp.dirname(__file__)
sys.path.append(osp.join(CURR_DIR, ".."))
from benchmark import vot
from logger.logger import setup_logger
from configuration import MODEL_CONFIG, TRACK_CONFIG
from inference.tracker import Tracker

setproctitle.setproctitle('VOT_BENCHMARK_TEST')
logger = setup_logger(logfile=None)

# LOAD MODEL #
model_fn = TRACK_CONFIG['model']
if os.path.exists(model_fn):
    model = importlib.import_module("models." +
                                    MODEL_CONFIG['model_id']).GONET(
                                        MODEL_CONFIG['pretrained_model_fn'],
                                        TRACK_CONFIG['use_gpu'])
    if TRACK_CONFIG['use_gpu']:
        model.load_state_dict(torch.load(model_fn))
    else:
        model.load_state_dict(
            torch.load(model_fn, map_location=lambda storage, loc: storage))
    model.eval()
else:
Beispiel #5
0
def main():
    args = get_arguments()
    start = time.time()
    """Create the model and start the training."""
    logger = setup_logger()
    if os.environ['HOSTNAME'] != 'train119.hogpu.cc':
        writer = SummaryWriter(os.path.join("/job_tboard"))
    else:
        writer = SummaryWriter(args.model_save_path)
    logger.info(json.dumps(vars(args), indent=1))

    logger.info('Setting model...')
    model = DRSN()
    model.init(args.init_model_path, "yvos_train")
    model.train()
    model.float()
    model = torch.nn.DataParallel(model)
    model.cuda()
    #print(model)
    logger.info('Setting criterion...')
    criterion = Criterion2()  # For softmax
    criterion.cuda()
    #criterion = Criterion()         # For sigmoid
    #criterion = CriterionDataParallel(criterion)

    # Set CUDNN and GPU associated
    logger.info('Setting CUDNN...')
    cudnn.enabled = cudnn.benchmark = True
    os.path.exists(args.model_save_path) or os.makedirs(args.model_save_path)

    trainset = YVOSDataset(args.img_size)
    trainset.data_len = args.max_iters * args.batch_size
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=32,
                                  pin_memory=True)

    learning_rate = args.learning_rate[0]
    # Adam is better
    optimizer = optim.Adam([{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': learning_rate }], \
                                    lr=learning_rate, weight_decay=args.weight_decay)

    for i_iter, batch in enumerate(trainloader):
        ref_images, ref_masks, p_images, p_masks, q_images, q_masks, pre_masks, images, masks = batch
        ref_images, p_images, q_images, images = ref_images.float().cuda(), p_images.float().cuda(), \
                                                 q_images.float().cuda(), images.float().cuda()
        ref_masks, p_masks, q_masks, pre_masks, masks = ref_masks.float().cuda(), p_masks.float().cuda(), \
                                                        q_masks.float().cuda(), pre_masks.float().cuda(), \
                                                        masks.long().cuda()
        ref_imasks = torch.cat([ref_images, ref_masks], 1)
        p_imasks = torch.cat([p_images, p_masks], 1)
        q_imasks = torch.cat([q_images, q_masks], 1)
        n_imasks = torch.cat([images, pre_masks], 1)

        optimizer.zero_grad()
        #adjust_learning_rate(optimizer, i_iter, args)
        #preds = model(ref_rgb_mask, cand_rgb_mask0, cand_rgb_mask1, input_rgb_mask)
        preds = model(ref_imasks, p_imasks, q_imasks, n_imasks)

        loss = criterion(preds, masks)
        loss.backward()
        optimizer.step()
        loss = loss.data.cpu().numpy()

        if i_iter == args.decayat:
            optimizer.param_groups[0]['lr'] = learning_rate * 0.1

        if i_iter % 200 == 0:
            writer.add_scalar('MaskTrack_LearningRate',
                              optimizer.param_groups[0]['lr'], i_iter)
            writer.add_scalar('MaskTrack_Loss/TrainLoss', loss, i_iter)

        #if i_iter % 500 == 0:
        #    g_images_inv = inv_preprocess(ref_images, args.save_num_images, IMG_MEAN)
        #    images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
        #    g_labels_colors = decode_labels(ref_masks, args.save_num_images, 2)
        #    labels_colors = decode_labels(masks, args.save_num_images, 2)
        #    if isinstance(preds, list):
        #        preds = preds[0]
        #    preds_colors = decode_predictions(preds, args.save_num_images, 2)
        #    pre_masks_colors = decode_predictions(pre_masks, args.save_num_images, 2)
        #    for index, (img, lab) in enumerate(zip(images_inv, labels_colors)):
        #        writer.add_image('MaskTrack_CurImages/'+str(index), img, i_iter)
        #        writer.add_image('MaskTrack_CurLabels/'+str(index), lab, i_iter)
        #        writer.add_image('MaskTrack_RefImages/'+str(index), g_images_inv[index], i_iter)
        #        writer.add_image('MaskTrack_RefLabels/'+str(index), g_labels_colors[index], i_iter)
        #        writer.add_image('MaskTrack_CurPreds/'+str(index), preds_colors[index], i_iter)
        #        writer.add_image('MaskTrack_PreMasks/'+str(index), pre_masks_colors[index], i_iter)

        logger.info('Train iter {} of {} completed, loss = {}'.format(
            i_iter, args.max_iters, loss))

        if (i_iter + 1) % args.save_iters == 0 or i_iter >= args.max_iters - 1:
            snapshot_fn = osp.join(args.model_save_path,
                                   'drsn_' + str(i_iter + 1) + '.pth')
            logger.info("Snapshot {} dumped...".format(snapshot_fn))
            torch.save(model.state_dict(), snapshot_fn)

    end = time.time()
    total_h, total_m = sec2hm(end - start)
    logger.info('The whole training costs {}h {}m...'.format(total_h, total_m))
Beispiel #6
0
from database.sql_alchemy.serializer import create_artist_for_db, create_album_for_db, create_song_for_db
from database.sql_alchemy.declarative import *
from database.sql_alchemy.insert import insert_artist, insert_album, insert_song
from database.sql_alchemy.sa_utils import managedSession, initialize, get_sql_file
from genius.genius_api import *

from genius.models.song_lite import GeniusSongLite
from logger import logger
from main.get_data import get_all_artists, get_all_artist_albums
from music_brainz.get_data import get_artist_albums_from_mb, get_artist_data
from music_brainz.models.album import Album as Mb_Album
from utils.misc_utils import *
from main.build_data import get_songs_from_genius, get_artist_from_genius

LOGGER = logger.setup_logger('root', logging.DEBUG)


def build_with_existing(db_artist, genius_artist, mb_artist, valid_albums):
    albums: List[Album] = get_all_artist_albums(db_artist.id)

    new_albums = []

    for va in valid_albums:
        exists = False
        for album in albums:
            if compare_strings(va.mb_data.title, album.name):
                exists = True
                break
        if not exists:
            new_albums.append(va)
Beispiel #7
0
import numpy as np
import torch
import argparse
from sklearn.neighbors import NearestNeighbors
from PIL import Image
import os
import cv2
from net.loss import minTripletLoss
import torch.nn as nn
from net.utils import projection_for_visualization
from net.bs import BSWrapper
from multiprocessing.dummy import Pool as ThreadPool
import time
from logger.logger import setup_logger

logger = setup_logger()

image_dims = [854, 480]
reduced_image_dims = [image_dims[1] // 8 + 1, image_dims[0] // 8 + 1]
embedding_vector_dims = 128
K = 5
PARALLEL_BS = 10
global loss_func
global bs_wrapper
mean_value = np.array([122.675, 116.669, 104.008])


def interp(x, out_size=(854, 480)):
    im = Image.fromarray(x)
    return np.asarray(im.resize(out_size, resample=Image.BILINEAR))
Beispiel #8
0
def main():
    """Create the model and start the training."""
    logger = setup_logger()
    if os.environ['HOSTNAME'] != 'train119.hogpu.cc':
        writer = SummaryWriter(os.path.join("/job_tboard"))
    else:
        writer = SummaryWriter(args.model_save_path)
    logger.info(json.dumps(vars(args), indent=1))

    logger.info('Setting model...')
    model = DRSN()
    model.init(args.seg_model_path, stage='davis_train')
    model = torch.nn.DataParallel(model)
    model.train()
    model.float()
    #print(model)

    #model.eval() # use_global_stats = True
    # model = SelfDataParallel(model)
    # model.apply(set_bn_momentum)

    logger.info('Setting criterion...')
    criterion = Criterion2()  # For softmax
    #criterion = Criterion()         # For sigmoid
    #criterion = CriterionDataParallel(criterion)

    # Set CUDNN and GPU associated
    # gpu = args.gpu
    logger.info('Setting CUDNN...')
    cudnn.enabled = cudnn.benchmark = True
    model.cuda()
    criterion.cuda()

    os.makedirs(args.model_save_path, exist_ok=True)
    [
        os.makedirs(os.path.join(args.model_save_path, x), exist_ok=True)
        for x in ['stage1', 'stage2']
    ]

    trainset_s1 = DAVISDataSet(split='train',
                               img_size=args.im_size,
                               stage='stage1')
    trainset_s1.data_len = args.steps_s1 * args.batch_size
    trainset_s2 = DAVISDataSet(split='train',
                               img_size=args.im_size,
                               stage='stage2')
    trainset_s2.data_len = args.steps_s2 * args.batch_size
    trainloader_s1 = data.DataLoader(trainset_s1,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=args.batch_size // 2 + 1,
                                     pin_memory=True)
    trainloader_s2 = data.DataLoader(trainset_s2,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=args.batch_size // 2 + 1,
                                     pin_memory=True)

    #valset = DAVISDataSet(split='val', img_size=args.im_size)
    #valset.data_len = args.max_iters * args.batch_size
    #validloader = data.DataLoader(valset, batch_size=args.batch_size, shuffle=True, num_workers=args.batch_size//2+1, pin_memory=True)

    lr_s1 = args.lr_s1
    lr_s2 = args.lr_s2
    # Adam is better
    optimizer_s1 = optim.Adam([{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': lr_s1 }], \
                                    lr=lr_s1, weight_decay=args.weight_decay)
    optimizer_s2 = optim.Adam([{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': lr_s2 }], \
                                    lr=lr_s2, weight_decay=args.weight_decay)

    start_iter = 0
    for i_iter, batch in enumerate(trainloader_s1, start_iter):
        ref_images, ref_masks, p_images, p_masks, q_images, q_masks, pre_masks, images, masks = batch
        ref_images, p_images, q_images, images = ref_images.float().cuda(), p_images.float().cuda(), \
                                                        q_images.float().cuda(), images.float().cuda()
        ref_masks, p_masks, q_masks, pre_masks, masks = ref_masks.float().cuda(), p_masks.float().cuda(), \
                                                        q_masks.float().cuda(), pre_masks.float().cuda(), \
                                                        masks.long().cuda()
        ref_imasks = torch.cat([ref_images, ref_masks], 1)
        p_imasks = torch.cat([p_images, p_masks], 1)
        q_imasks = torch.cat([q_images, q_masks], 1)
        n_imasks = torch.cat([images, pre_masks], 1)

        optimizer_s1.zero_grad()
        preds = model(ref_imasks, p_imasks, q_imasks, n_imasks)

        loss = criterion(preds, masks)
        loss.backward()
        optimizer_s1.step()
        loss = loss.data.cpu().numpy()

        if i_iter % 100 == 0:
            writer.add_scalar('MaskTrack_Loss/TrainLoss', loss, i_iter)

        #if i_iter % 1000 == 0 and i_iter != start_iter:
        #    logger.info('Start to do evaluation...')
        #    valid_loss = validate(validloader, model, criterion, logger=logger)
        #    writer.add_scalar('MaskTrack_Loss/ValidLoss', valid_loss, i_iter)

        #if i_iter % 500 == 0:
        #    g_images_inv = inv_preprocess(g_images, args.save_num_images, IMG_MEAN)
        #    images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
        #    g_labels_colors = decode_labels(g_labels, args.save_num_images, 2)
        #    labels_colors = decode_labels(labels, args.save_num_images, 2)
        #    if isinstance(preds, list):
        #        preds = preds[0]
        #    # probs = nn.functional.sigmoid(preds)
        #    preds_colors = decode_predictions(preds, args.save_num_images, 2)
        #    pre_masks_colors = decode_predictions(pre_masks, args.save_num_images, 2)
        #    for index, (img, lab) in enumerate(zip(images_inv, labels_colors)):
        #        writer.add_image('MaskTrack_CurImages/'+str(index), img, i_iter)
        #        writer.add_image('MaskTrack_CurLabels/'+str(index), lab, i_iter)
        #        writer.add_image('MaskTrack_RefImages/'+str(index), g_images_inv[index], i_iter)
        #        writer.add_image('MaskTrack_RefLabels/'+str(index), g_labels_colors[index], i_iter)
        #        writer.add_image('MaskTrack_CurPreds/'+str(index), preds_colors[index], i_iter)
        #        writer.add_image('MaskTrack_PreMasks/'+str(index), pre_masks_colors[index], i_iter)

        logger.info(
            'Train adapt stage1 iter {} of {} completed, loss = {}'.format(
                i_iter, args.steps_s1, loss))

        if (i_iter + 1) % args.save_iters == 0 or i_iter >= args.steps_s1 - 1:
            snapshot_fn = osp.join(
                args.model_save_path,
                'stage1/drsn_davis_' + str(i_iter + 1) + '.pth')
            logger.info("Snapshot {} dumped...".format(snapshot_fn))
            torch.save(model.state_dict(), snapshot_fn)

    for i_iter, batch in enumerate(trainloader_s2, args.steps_s1):
        ref_images, ref_masks, p_images, p_masks, q_images, q_masks, pre_masks, images, masks = batch
        ref_images, p_images, q_images, images = ref_images.float().cuda(), p_images.float().cuda(), \
                                                        q_images.float().cuda(), images.float().cuda()
        ref_masks, p_masks, q_masks, pre_masks, masks = ref_masks.float().cuda(), p_masks.float().cuda(), \
                                                        q_masks.float().cuda(), pre_masks.float().cuda(), \
                                                        masks.long().cuda()
        ref_imasks = torch.cat([ref_images, ref_masks], 1)
        p_imasks = torch.cat([p_images, p_masks], 1)
        q_imasks = torch.cat([q_images, q_masks], 1)
        n_imasks = torch.cat([images, pre_masks], 1)

        optimizer_s1.zero_grad()
        preds = model(ref_imasks, p_imasks, q_imasks, n_imasks)

        loss = criterion(preds, masks)
        loss.backward()
        optimizer_s2.step()
        loss = loss.data.cpu().numpy()

        if i_iter % 100 == 0:
            writer.add_scalar('MaskTrack_Loss/TrainLoss', loss, i_iter)

        #if i_iter % 1000 == 0 and i_iter != start_iter:
        #    logger.info('Start to do evaluation...')
        #    valid_loss = validate(validloader, model, criterion, logger=logger)
        #    writer.add_scalar('MaskTrack_Loss/ValidLoss', valid_loss, i_iter)

        #if i_iter % 500 == 0:
        #    g_images_inv = inv_preprocess(g_images, args.save_num_images, IMG_MEAN)
        #    images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
        #    g_labels_colors = decode_labels(g_labels, args.save_num_images, 2)
        #    labels_colors = decode_labels(labels, args.save_num_images, 2)
        #    if isinstance(preds, list):
        #        preds = preds[0]
        #    # probs = nn.functional.sigmoid(preds)
        #    preds_colors = decode_predictions(preds, args.save_num_images, 2)
        #    pre_masks_colors = decode_predictions(pre_masks, args.save_num_images, 2)
        #    for index, (img, lab) in enumerate(zip(images_inv, labels_colors)):
        #        writer.add_image('MaskTrack_CurImages/'+str(index), img, i_iter)
        #        writer.add_image('MaskTrack_CurLabels/'+str(index), lab, i_iter)
        #        writer.add_image('MaskTrack_RefImages/'+str(index), g_images_inv[index], i_iter)
        #        writer.add_image('MaskTrack_RefLabels/'+str(index), g_labels_colors[index], i_iter)
        #        writer.add_image('MaskTrack_CurPreds/'+str(index), preds_colors[index], i_iter)
        #        writer.add_image('MaskTrack_PreMasks/'+str(index), pre_masks_colors[index], i_iter)

        logger.info(
            'Train adapt stage2 iter {} of {} completed, loss = {}'.format(
                i_iter, args.steps_s2 + args.steps_s1, loss))

        if (
                i_iter + 1
        ) % args.save_iters == 0 or i_iter >= args.steps_s2 + args.steps_s1 - 1:
            snapshot_fn = osp.join(
                args.model_save_path,
                'stage2/drsn_davis_' + str(i_iter + 1) + '.pth')
            logger.info("Snapshot {} dumped...".format(snapshot_fn))
            torch.save(model.state_dict(), snapshot_fn)

    end = timeit.default_timer()
    total_h, total_m = sec2hm(end - start)
    logger.info('The whole training costs {}h {}m...'.format(total_h, total_m))
Beispiel #9
0
def main():
    logger = setup_logger(logfile=None)
    valider = Valider(logger)
    valider.valid()
Beispiel #10
0
def train_rtmdnet():
    # Prepare dataset, model, optimizer, loss
    with open(pretrain_opts['vid_pkl'], "rb") as fp:
        data = pickle.load(fp)
    K = len(data)
    K = 2000
    print("VID has {} videos...".format(K))
    logger = setup_logger(
        logfile="./snapshots/train_{}cycle_{}video.log".format(
            pretrain_opts['n_cycles'], K))

    # Model
    mdnet = MDNet(K=K)
    mdnet.set_learnable_params(pretrain_opts['ft_layers'])
    mdnet.cuda()

    # https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/configs/e2e_mask_rcnn_R_50_FPN_1x.yaml
    # https://github.com/facebookresearch/maskrcnn-benchmark/blob/c56832ed8e05eb493c2e9ff8d8a8878a565223b9/maskrcnn_benchmark/modeling/poolers.py
    FPN_RoIAlign = Pooler(output_size=(7, 7),
                          scales=(0.25, 0.125, 0.0625, 0.03125),
                          sampling_ratio=2)
    # Optimizer
    binaryCriterion = BinaryLoss().cuda()
    evaluator = Precision()
    optimizer = set_optimizer(mdnet, pretrain_opts['lr'])

    # Data
    dataset = [None] * K
    print("Building dataset...")
    for k, (seq_name, seq) in enumerate(data.items()):
        img_list, gt = seq['images'], seq['gt']
        img_dir = os.path.join(pretrain_opts['vid_home'], seq_name)
        dataset[k] = VIDRegionDataset(img_dir, img_list, gt, pretrain_opts)
        if k >= K - 1: break

    best_score = 0
    batch_idx = 0
    precision = np.zeros(pretrain_opts['n_cycles'])
    for i in range(pretrain_opts['n_cycles']):
        print("==== Start Cycle {} ====".format(i))
        k_list = np.random.permutation(K)
        prec = np.zeros(K)

        for j, k in enumerate(k_list):
            tic = time.time()
            img_pool, box_pool = dataset[k].__next__()
            #dataset[k].DEBUG_imgbox(img_pool, box_pool, "debug")
            opn_feat, opn_rois = mdnet.forward_OPN(img_pool)
            opn_roi_feats = FPN_RoIAlign(opn_feat,
                                         opn_rois)  #[Bx1000, 256, 7, 7]
            pos_idx, neg_idx = sample_pos_neg_idxs(box_pool, opn_rois)

            if pos_idx is None and neg_idx is None:
                continue

            pos_roi_feats = opn_roi_feats[pos_idx]
            neg_roi_feats = opn_roi_feats[neg_idx]

            pos_roi_feats = pos_roi_feats.view(pos_roi_feats.size(0), -1)
            neg_roi_feats = neg_roi_feats.view(neg_roi_feats.size(0), -1)

            # Compute score
            pos_score = mdnet(pos_roi_feats, k, in_layer='fc4')
            neg_score = mdnet(neg_roi_feats, k, in_layer='fc4')

            cls_loss = binaryCriterion(pos_score, neg_score)
            cls_loss.backward()
            batch_idx += 1

            if (batch_idx % pretrain_opts['seqbatch_size']) == 0:
                print("Update weights...")
                torch.nn.utils.clip_grad_norm_(mdnet.parameters(),
                                               pretrain_opts['grad_clip'])
                optimizer.step()
                optimizer.zero_grad()
                batch_idx = 0

            prec[k] = evaluator(pos_score, neg_score)

            toc = time.time() - tic
            if j % 10 == 0:
                print ("Cycle %2d, K %2d (%2d), BinLoss %.3f, Prec %.3f, Time %.3f" % \
                                              (i, j, k, cls_loss.item(), prec[k], toc))

        cur_score = prec.mean()
        precision[i] = cur_score
        print("Mean Precision: %.3f " % (cur_score))
        if cur_score > best_score:
            best_score = cur_score
            states = {'fclayers': mdnet.fclayers.state_dict()}
            print("Save model to %s" % pretrain_opts['model_path'])
            torch.save(states, pretrain_opts['model_path'])
    np.savetxt("precision.txt", precision, fmt='%2.2f')
Beispiel #11
0
import itertools
import time
import setproctitle

from db.db import FFDB, AssistDB
from logger.logger import setup_logger


def delay(t=0):
    if not t:
        time.sleep(round(1 / (1000/60/60)))
    else:
        time.sleep(round(t))


logger = setup_logger(logfile=LOGFN)

""" FF's fetcher, it can pull ff's data into python types var official API """
class Fetcher:
    def __init__(self):
        self.get_auth()
        self.client = self.get_client()

    def restart(self):
        self.__init__()
    
    def get_auth(self):
        self.consumer_key = AUTH['consumer_key']
        self.consumer_sec = AUTH['consumer_sec']
        self.username = AUTH['username']
        self.password = AUTH['password']