Example #1
0
 def __init__(self, num_features):
     super(BSP_CDAN, self).__init__()
     self.model_fc = model.FeatureExtractor(num_features)
     self.bottleneck_layer1 = nn.Linear(num_features, 256)
     self.bottleneck_layer1.apply(init_weights)
     self.bottleneck_layer = nn.Sequential(self.bottleneck_layer1, nn.ReLU(), nn.Dropout(0.5))
     self.classifier_layer = nn.Linear(256, 4)
     self.classifier_layer.apply(init_weights)
     self.predict_layer = nn.Sequential(self.model_fc, self.bottleneck_layer, self.classifier_layer)
Example #2
0
                   default='../../data/Kitti/training/image_2/',
                   help='RGB image directory')

args = parser.parse_args()
print(args)

strideNet = 8
Transform = outil.Homography
nbPoint = 4
torch.manual_seed(1000)
np.random.seed(1000)

## Loading model
# Define Networks
network = {
    'netFeatCoarse': model.FeatureExtractor(),
    'netCorr': model.CorrNeigh(args.kernelSize),
    'netFlowCoarse': model.NetFlowCoarse(args.kernelSize),
    'netMatch': model.NetMatchability(args.kernelSize),
}

for key in list(network.keys()):
    network[key].cuda()
    typeData = torch.cuda.FloatTensor

# loading Network
if args.resumePth:
    param = torch.load(args.resumePth)
    msg = 'Loading pretrained model from {}'.format(args.resumePth)
    print(msg)
Example #3
0
if __name__ == "__main__":
    ANCHORS = '1.08,1.19,  3.42,4.41,  6.63,11.38,  9.42,5.11,  16.62,10.52'
    # ANCHORS = '1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.00711'
    CLASSES = [
        "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
        "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
        "pottedplant", "sheep", "sofa", "train", "tvmonitor"
    ]

    ANCHOR_VALUES = np.reshape(
        [float(ANCHORS.strip()) for ANCHORS in ANCHORS.split(',')], [5, 2])
    box_colors = [(int(random.random() * 255), int(random.random() * 255),
                   int(random.random() * 255)) for i in range(20)]

    fe = model.FeatureExtractor()
    net = fe.yolo_convolutional_net()
    net.summary()
    net.load_weights("yolo-voc.1.0.h5")

    cap = cv2.VideoCapture("traffic.mp4")
    while True:
        should_break = False
        tmp, im_data = cap.read()
        im_h, im_w = im_data.shape[:2]
        im_out = im_data.copy()
        im_data = letter(im_data, 416, 416)
        im_data = im_data[:, :, ::-1]
        im_data = im_data.astype(np.float32).reshape((1, 416, 416, 3))
        im_data /= 255.0
        fake_boxes = np.zeros((1, 1, 1, 1, 15, 4))
Example #4
0
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
from tqdm import tqdm

batchSize = 16
content_criterion = nn.MSELoss()
GeneratorDevice = torch.device("cuda:1")
DiscriminatorDevice = torch.device("cuda:0")
adversarial_criterion = nn.BCELoss()

train_dataloader = torch.utils.data.DataLoader(dataloader.train_dataset, batchSize, shuffle=True, num_workers=12)
valid_dataloader = torch.utils.data.DataLoader(dataloader.dev_dataset, 1, shuffle=False)

generator = model.Generator(16, dataloader.SCALING_FACTOR)
discriminator = model.Discriminator()
feature_extractor = model.FeatureExtractor()
writer = SummaryWriter()
generator.load_state_dict(torch.load('./checkpoint/generator_final.pth'))
discriminator.load_state_dict(torch.load('./checkpoint/discriminator_final.pth'))

generator = generator.to(GeneratorDevice)
discriminator = discriminator.to(DiscriminatorDevice)
feature_extractor = feature_extractor.to(DiscriminatorDevice)
low_res = torch.FloatTensor(batchSize, 3, dataloader.LOWRES, dataloader.LOWRES)
ones_const = torch.tensor(torch.ones(batchSize, 1), device=DiscriminatorDevice)



def train():
    optim_generator = optim.Adam(generator.parameters(), lr=0.0001)
    count = 0