Ejemplo n.º 1
0
def main():
    # import network architecture
    builder = ModelBuilder()
    model = builder.build_net(arch=TrainGlobalConfig.id,
                              num_input=TrainGlobalConfig.num_input,
                              num_classes=TrainGlobalConfig.num_classes,
                              num_branches=TrainGlobalConfig.num_branches,
                              padding_list=TrainGlobalConfig.padding_list,
                              dilation_list=TrainGlobalConfig.dilation_list)

    model = model.to(device)

    ch = torch.load(
        f"./result/{TrainGlobalConfig.base_dir}/last-checkpoint.bin")
    model.load_state_dict(fix_model_state_dict(ch["model_state_dict"]))

    valid_list = os.path.join(TrainGlobalConfig.root_path, "valid_0.txt")
    valid_dataset = BraTSDataset(
        list_file=valid_list,
        root=TrainGlobalConfig.root_path,
        phase="val",
    )

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=TrainGlobalConfig.num_workers,
        pin_memory=True)
    test(valid_loader, model, TrainGlobalConfig)
Ejemplo n.º 2
0
def patch_batch_classification_predict():
    test_ids = [6]

    input_shape = (256, 256, 3)

    #class_mode = "binary";
    class_mode = "categorical"

    model_name = "refinenet"
    #model_weights = "/media/fty/Windows/linux_data/weights/binary/pbic/unet_weights_epoch100.h5";
    model_weights = "/media/fty/Windows/linux_data/weights/categorical/refinement/refinenet_weights_epoch300.h5"

    plot = Vaihingen_class
    active_positive_class = []
    active_positive_class.append(Vaihingen_class.Building)
    active_positive_class.append(Vaihingen_class.Tree)
    active_positive_class.append(Vaihingen_class.Car)
    active_positive_class.append(Vaihingen_class.Low_vegetation)

    classes = len(active_positive_class) + 1

    test_batch_size = 8
    patch_based_dataset_test = Patch_based_dataset(tiff_path, label_path, plot,
                                                   active_positive_class)

    patch_based_dataset_test.prepare_patch_based_dataset(
        is_train=False,
        load_ids=test_ids,
        batch_size=test_batch_size,
        class_mode=class_mode,
        classes=classes,
        is_augment=False,
        model_input_pixel_size=(input_shape[0], input_shape[1]),
        predict_center_pixel_size=(128, 128),
        evaluated_path=evaluated_path)
    model = ModelBuilder(PAI_FLAGS=None,
                         input_shape=input_shape,
                         classes=classes,
                         model_name=model_name,
                         load_weights=model_weights,
                         class_mode=class_mode)

    class_result_pics = model.predict_and_evaluate(
        test_dataset=patch_based_dataset_test,
        steps_per_epoch=int(
            math.ceil(patch_based_dataset_test.get_n_samples() /
                      float(test_batch_size))),
        verbose=1,
        pixel_based_evaluate=True,
        show_class_result_pic=True)

    for i in class_result_pics.keys():
        plt.subplot(121)
        plt.imshow(patch_based_dataset_test.evaluations[i][:, :, 0])
        plt.subplot(122)
        plt.imshow(class_result_pics[i])
        plt.show()
Ejemplo n.º 3
0
def main():
    # import network architecture
    builder = ModelBuilder()
    model = builder.build_net(arch=TrainGlobalConfig.id,
                              num_input=TrainGlobalConfig.num_input,
                              num_classes=TrainGlobalConfig.num_classes,
                              num_branches=TrainGlobalConfig.num_branches,
                              padding_list=TrainGlobalConfig.padding_list,
                              dilation_list=TrainGlobalConfig.dilation_list)

    model = model.to(device)

    optimizer = optim.RMSprop(model.parameters(),
                              TrainGlobalConfig.lr,
                              alpha=0.9,
                              eps=10**(-4),
                              weight_decay=1e-4,
                              momentum=0.6)
    criterion = nn.CrossEntropyLoss()

    train_list = os.path.join(TrainGlobalConfig.root_path, "train_0.txt")
    valid_list = os.path.join(TrainGlobalConfig.root_path, "valid_0.txt")

    train_dataset = BraTSDataset(list_file=train_list,
                                 root=TrainGlobalConfig.root_path,
                                 crop_size=TrainGlobalConfig.crop_size,
                                 num_input=TrainGlobalConfig.num_input)

    valid_dataset = BraTSDataset(list_file=valid_list,
                                 root=TrainGlobalConfig.root_path,
                                 crop_size=TrainGlobalConfig.crop_size,
                                 num_input=TrainGlobalConfig.num_input)

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=TrainGlobalConfig.batch_size,
        shuffle=True,
        num_workers=TrainGlobalConfig.num_workers,
        pin_memory=True)

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=TrainGlobalConfig.batch_size,
        shuffle=False,
        num_workers=TrainGlobalConfig.num_workers,
        pin_memory=True)

    trainer = PytorchTrainer(
        model=model,
        optimizer=optimizer,
        criterion=criterion,
        device=device,
        config=TrainGlobalConfig,
    )

    trainer.fit(train_loader, valid_loader)
Ejemplo n.º 4
0
def main(args):
    # torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    
    enc_out = torch.randn(([1,2048,64,64]))
    net_encoder = builder.build_encoder(
        weights="baseline-resnet50dilated-ppm_deepsup/encoder_epoch_20.pth")
    gcu = GraphConv()#, V=2), GCU(X=enc_out, V=4), GCU(X=enc_out, V=8),GCU(X=enc_out, V=32)]

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, gcu, crit, tr=False)

    # print("Prinitng Params", gcu[1].parameters())
    for m in gcu.parameters():
        print("Hello",m.shape,m.name,m)
    print("dddddddddddddddd", len(list(gcu.parameters())))
    for m in gcu.modules():
        print("Prining", m.parameters())
    # Dataset and Loader
    if len(args.test_imgs) == 1 and os.path.isdir(args.test_imgs[0]):
        test_imgs = find_recursive(args.test_imgs[0])

    else:
        test_imgs = args.test_imgs


    list_test = [{'fpath_img': x} for x in test_imgs]
    
    dataset_test = TestDataset(
        list_test, args, max_sample=-1)


    loader_test = torchdata.DataLoader(
        dataset_test,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)


    # Main loop
    test(segmentation_module, loader_test, args)

    print('Inference done!')
Ejemplo n.º 5
0
def main():
    num_train_samples = sum([len(files) for r, d, files in os.walk(train_dir)])
    num_valid_samples = sum([len(files) for r, d, files in os.walk(valid_dir)])

    num_train_steps = math.floor(num_train_samples / batch_size)
    num_valid_steps = math.floor(num_valid_samples / batch_size)

    model = ModelBuilder().build((img_width, img_height, 3), num_classes)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       horizontal_flip=True,
                                       zoom_range=[1, 1.15],
                                       rotation_range=15,
                                       width_shift_range=0.1,
                                       fill_mode='wrap')

    train_generator = train_datagen.flow_from_directory(
        train_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='categorical',
        shuffle=True)

    valid_datagen = ImageDataGenerator(rescale=1. / 255)

    valid_generator = valid_datagen.flow_from_directory(
        valid_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='categorical',
        shuffle=True)

    earlyStopping = EarlyStopping(monitor='val_loss',
                                  min_delta=0.1,
                                  patience=10,
                                  verbose=1,
                                  mode='auto')

    csvLogger = CSVLogger(filename=output_dir + '/training.log')

    checkpointer = ModelCheckpoint(output_dir + '/model_checkpoint.h5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit_generator(train_generator,
                        steps_per_epoch=num_train_steps,
                        epochs=num_epochs,
                        validation_data=valid_generator,
                        validation_steps=num_valid_steps,
                        callbacks=[csvLogger, checkpointer])

    model.save(output_dir + '/model_network.h5')
Ejemplo n.º 6
0
def main(path, is_training, is_predicting, model_weights_file,
         submission_file):
    print('Starting train_statefarm.py')
    print('* using path: {0}'.format(path))
    print('* training: {0}, predicting: {1}'.format(is_training,
                                                    is_predicting))

    batch_size = 64
    data_provider = DataProvider(os.path.join(path, 'partition'), batch_size)
    feature_provider = FeatureProvider(data_provider)
    training_data_provider = TrainingDataProvider(data_provider,
                                                  feature_provider)

    builder = ModelBuilder(training_data_provider,
                           dropout=0.6,
                           batch_size=batch_size)

    if is_training:
        print('Train last layer of dense model with batch normalization.')
        builder.train_last_layer()

    if is_training:
        print('Train dense layers of model with batch normalization.')
        builder.train_dense_layers()

    model = builder.build(data_provider)

    if not is_training:
        print('Loading model weights from {0}'.format(model_weights_file))
        model.load_weights(
            data_provider.get_weight_filepath(model_weights_file))
    else:
        model.train()
        print('Writing model weights to {0}'.format(model_weights_file))
        model.save_weights(
            data_provider.get_weight_filepath(model_weights_file))

    if is_predicting:
        print('Writing predictions to {0}'.format(submission_file))
        batch_size = 2
        data_provider = DataProvider(path, batch_size)
        predict_states(model, data_provider, batch_size, submission_file)
Ejemplo n.º 7
0
def super_pixel_classification_predict():

    test_ids = [6]

    input_shape = (64, 64, 3)
    n_segments = 20
    model_name = "resnet"

    model_weights = "/media/fty/Windows/linux_data/weights/categorical/resnet/resnet_super_pixel_weights_epoch450.h5"

    plot = Vaihingen_class
    active_positive_class = []
    active_positive_class.append(Vaihingen_class.Building)
    active_positive_class.append(Vaihingen_class.Tree)
    active_positive_class.append(Vaihingen_class.Car)
    active_positive_class.append(Vaihingen_class.Low_vegetation)
    classes = len(active_positive_class) + 1

    super_pixel_dataset_test = Super_pixel_seg_dataset(tiff_path, label_path,
                                                       plot,
                                                       active_positive_class)

    test_batch_size = 100
    super_pixel_dataset_test.prepare_superpixel_dataset(
        is_train=False,
        load_ids=test_ids,
        n_segments=n_segments,
        batch_size=test_batch_size,
        is_augment=False,
        model_input_pixel_size=(input_shape[0], input_shape[1]),
        one_hot=False,
        save_segments=True,
        evaluated_path=evaluated_path,
        exclude_boundary_objs=False)

    model = ModelBuilder(PAI_FLAGS=None,
                         input_shape=input_shape,
                         classes=classes,
                         model_name=model_name,
                         load_weights=model_weights)

    class_result_pics = model.predict_and_evaluate(
        test_dataset=super_pixel_dataset_test,
        steps_per_epoch=int(
            math.ceil(super_pixel_dataset_test.get_n_samples() /
                      float(test_batch_size))),
        verbose=1,
        object_based_evaluate=True,
        pixel_based_evaluate=True,
        show_class_result_pic=True)

    #     print(class_result_pics);
    #     print(prob_result_pics);
    #     print(class_result_pics[6].shape);
    #     print(prob_result_pics[6].shape);

    for i in class_result_pics.keys():
        plt.subplot(121)
        plt.imshow(super_pixel_dataset_test.evaluations[i][:, :, 0])
        plt.subplot(122)
        plt.imshow(class_result_pics[i])

        plt.show()
Ejemplo n.º 8
0
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("device :", device)

    # ground truth
    gt_on = args.gt_on  # IoU 정확도를 측정할 것인지
    f = open('ground_truth/Non_video4_GT.txt', 'r')  # GT 파일
    record = args.record  # IoU 정확도, 이미지를 저장할 것인지

    # create model
    model = ModelBuilder()

    # load model
    checkpoint = torch.load("pretrained_model/model.pth",
                            map_location=lambda storage, loc: storage.cpu())

    model.load_state_dict(checkpoint)
    model.eval().to(device)

    # build tracker
    tracker = build_tracker(model)

    first_frame = True
    video_name = args.video_name.split('/')[-1].split('.')[0]
    cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN)

    frame_num = 0
    first_time = True
    current_target = -1
    for frame, focal in get_frames(args.video_name, args.type, args.img2d_ref,
                                   args.start_num, args.last_num):
        frame_num += 1
        if first_frame:
            try:
                init_rect = cv2.selectROI(video_name, frame, False, False)
            except:
                exit()
            tracker.init(frame, init_rect)
            first_frame = False
        else:
            max_index = -1
            max_val = 0
            if first_time:
                outputs = [tracker.track(cv2.imread(f)) for f in focal]

                for i in range(len(outputs)):
                    if outputs[i]['best_score'] >= max_val:
                        max_val = outputs[i]['best_score']
                        max_index = i
                first_time = False
                current_target = max_index
            else:
                outputs = [
                    tracker.track(cv2.imread(focal[i]))
                    for i in range(current_target - 3, current_target + 3)
                ]

                for i in range(len(outputs)):
                    if outputs[i]['best_score'] >= max_val:
                        max_val = outputs[i]['best_score']
                        max_index = i
                if max_index > 3:
                    current_target = current_target + abs(3 - max_index)
                elif max_index < 3:
                    current_target = current_target - abs(3 - max_index)

            ground_truth(outputs[max_index]['bbox'][:2],
                         outputs[max_index]['bbox'][2:])

            bbox = list(map(int, outputs[max_index]['bbox']))

            cv2.rectangle(frame, (bbox[0], bbox[1]),
                          (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 0, 255),
                          3)
            save_path = os.path.join('data/result2',
                                     '{:03d}.jpg'.format(frame_num))
            cv2.imwrite(save_path, frame)

            # ground truth
            if gt_on:
                line = f.readline()
                bbox_label = line.split(',')
                bbox_label = list(map(int, bbox_label))

                iou = IOU(bbox, bbox_label)

                labelx = bbox_label[0] + (bbox_label[2] / 2)
                labely = bbox_label[1] + (bbox_label[3] / 2)

                pre = ((outputs[max_index]['cx'] - labelx)**2 +
                       (outputs[max_index]['cy'] - labely)**2)**0.5

                if record:
                    result_iou = open('ground_truth/result_iou.txt', 'a')
                    result_iou.write(str(iou) + ',')
                    result_iou.close()

                    result_pre = open('ground_truth/result_pre.txt', 'a')
                    result_pre.write(str(pre) + ',')
                    result_pre.close()

                cv2.rectangle(frame, (bbox_label[0], bbox_label[1]),
                              (bbox_label[0] + bbox_label[2],
                               bbox_label[1] + bbox_label[3]), (255, 255, 255),
                              3)

            cv2.imshow(video_name, frame)

            if record:
                save_image(frame_num, frame)
            cv2.waitKey(40)
Ejemplo n.º 9
0
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("device :", device)

    # ground truth
    gt_on = args.gt_on  # IoU 정확도를 측정할 것인지
    f = open('ground_truth/Non_video4_GT.txt', 'r')  # GT 파일

    # create model
    model = ModelBuilder()

    # load model
    checkpoint = torch.load("pretrained_model/model.pth",
                            map_location=lambda storage, loc: storage.cpu())

    model.load_state_dict(checkpoint)
    model.eval().to(device)

    # build tracker
    tracker = build_tracker(model)

    first_frame = True
    video_name = args.video_name.split('/')[-1].split('.')[0]
    cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN)

    frame_num = 0
    for frame in get_frames(args.video_name, args.type, args.img2d_ref, args.start_num, args.last_num):
        frame_num += 1
        if first_frame:
            try:
                init_rect = cv2.selectROI(video_name, frame, False, False)
            except:
                exit()
            tracker.init(frame, init_rect)
            first_frame = False
        else:
            outputs = tracker.track(frame)
            bbox = list(map(int, outputs['bbox']))

            #### ground truth ####
            if gt_on:
                line = f.readline()
                bbox_label = line.split(',')
                bbox_label = list(map(int, bbox_label))

                labelx = bbox_label[0] + (bbox_label[2] / 2)
                labely = bbox_label[1] + (bbox_label[3] / 2)

                iou = IOU(bbox, bbox_label)
                pre = ((outputs['cx'] - labelx)**2 +
                       (outputs['cy'] - labely)**2) ** 0.5

                if args.record:
                    result_iou = open('ground_truth/result_iou.txt', 'a')
                    result_iou.write(str(iou) + ',')
                    result_iou.close()

                    result_pre = open('ground_truth/result_pre.txt', 'a')
                    result_pre.write(str(pre) + ',')
                    result_pre.close()

                cv2.rectangle(frame, (bbox_label[0], bbox_label[1]),
                              (bbox_label[0]+bbox_label[2],
                               bbox_label[1]+bbox_label[3]),
                              (255, 255, 255), 3)

            #### ----------------- ####

            cv2.rectangle(frame, (bbox[0], bbox[1]),
                          (bbox[0]+bbox[2], bbox[1]+bbox[3]),
                          (0, 0, 255), 3)
            cv2.imshow(video_name, frame)
            if args.record:
                save_image(frame_num, frame)
            cv2.waitKey(40)
Ejemplo n.º 10
0
def main(args):
    # Network Builders
    builder = ModelBuilder()

    crit = nn.NLLLoss(ignore_index=-1)
    crit = crit.cuda()
    net_encoder = builder.build_encoder(
        weights="baseline-resnet50dilated-ppm_deepsup/encoder_epoch_20.pth")
    gcu = GraphConv(
        batch=args.batch_size_per_gpu
    )  #, V=2), GCU(X=enc_out, V=4), GCU(X=enc_out, V=8),GCU(X=enc_out, V=32)]
    # gcu.load_state_dict(torch.load("ckpt/baseline-resnet50dilated-ngpus1-batchSize1-imgMaxSize1000-paddingConst8-segmDownsampleRate8-epoch20/decoder_epoch_20.pth"))
    segmentation_module = SegmentationModule(net_encoder, gcu, crit, tr=True)

    # Dataset and Loader
    dataset_train = TrainDataset(args.list_train,
                                 args,
                                 batch_per_gpu=args.batch_size_per_gpu)

    loader_train = torchdata.DataLoader(
        dataset_train,
        batch_size=len(args.gpus),  # we have modified data_parallel
        shuffle=False,  # we do not use this param
        collate_fn=user_scattered_collate,
        num_workers=int(args.workers),
        drop_last=True,
        pin_memory=True)

    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # create loader iterator
    iterator_train = iter(loader_train)

    # load nets into gpu
    if len(args.gpus) > 4:
        segmentation_module = UserScatteredDataParallel(segmentation_module,
                                                        device_ids=args.gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)

# segmentation_module.cuda()

# Set up optimizers
# print(gcu[0].parameters())
    nets = (net_encoder, gcu, crit)
    optimizers, par = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': []}}
    vis = visdom.Visdom()
    win = vis.line(np.array([5.7]),
                   opts=dict(xlabel='epochs',
                             ylabel='Loss',
                             title='Training Loss V=16',
                             legend=['Loss']))

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        lss = train(segmentation_module, iterator_train, optimizers, history,
                    epoch, par, vis, win, args)

        # checkpointing
        checkpoint(nets, history, args, epoch)

    print('Training Done!')
Ejemplo n.º 11
0
        contextSentenceLengths = [
            sentenceLengths + [1] * (maxContextLen - len(sentenceLengths))
            for sentenceLengths in contextSentenceLengths
        ]  #apply padding for tensorflow tensor - padding with 1 instead of 0 so sequence-end-selectors dont fail with bufferunderrun
        questionInput = [
            question + [0] * (maxQuestionLen - len(question))
            for context, question, answer in samples
        ]
        answerInput = [answer for context, question, answer in samples]
        yield contextInput, contextLengths, contextSentenceLengths, questionInput, questionLengths, answerInput


#build the whole model and run it
#modelBuilder = ModelBuilder(batch_size, question_dim, obj_dim, dictSize)
modelBuilder = ModelBuilder(batch_size, macro_batch_size, question_dim,
                            obj_dim, dictSize, args.questionAwareContext,
                            args.f_layers, args.f_inner_layers, args.g_layers,
                            args.h_layers, args.appendPosVec)

(inputContext, inputContextLengths, inputContextSentenceLengths, inputQuestion,
 inputQuestionLengths, objects,
 question) = modelBuilder.buildWordProcessorLSTMs()

if modelToUse == 1:
    print("Using model I")
    rnOutput = modelBuilder.buildRN_I(objects, question)
elif modelToUse == 2:
    print("Using model II")
    rnOutput = modelBuilder.buildRN_II(objects, question)
elif modelToUse == 3:
    print("Using model III")
    rnOutput = modelBuilder.buildRN_III(objects, question)
    active_positive_class = [];
    active_positive_class.append(Vaihingen_class.Building);
    active_positive_class.append(Vaihingen_class.Tree);
    active_positive_class.append(Vaihingen_class.Car);
    active_positive_class.append(Vaihingen_class.Low_vegetation);
    classes = len(active_positive_class) + 1;
    
    patch_based_dataset_training = Patch_based_dataset(tiff_path, 
                                                       label_path,
                                                       plot,
                                                       active_positive_class);
    model = ModelBuilder(PAI_FLAGS=FLAGS,
                         input_shape=input_shape,
                         classes=classes,
                         model_name=model_name,
                         model_alias_name=model_alias_name,
                         load_weights=load_weights_path,
                         class_mode=class_mode,
                         upSampling2D_Bilinear=upSampling2D_Bilinear,
                         chained_res_pool_improved=chained_res_pool_improved);    
    
#     from keras.utils.vis_utils import plot_model;
#     plot_model(model.model, to_file="1.png", show_shapes=True);
    
    patch_based_dataset_training.prepare_patch_based_dataset(is_train=True,
                                                             load_ids=train_ids,
                                                             batch_size=batch_size,
                                                             class_mode=class_mode,
                                                             classes=classes,
                                                             is_augment=True, 
                                                             rotate_clip=True, 
Ejemplo n.º 13
0
    df = Reader.read_csv("data",
                         "NSE_Abbott India Limited.csv",
                         config="default",
                         streamType="csv",
                         columns="")
    shaper = DataSet.shape_data_frame(df,
                                      '',
                                      x_columns='1:9',
                                      y_columns='3',
                                      x_dimention='2',
                                      y_dimention='2',
                                      y_offset=1,
                                      test_data_size=20)
    normalizer = Utils.get_preprocessing_scaler(min_max_tuple=(-1, 1))
    shaper = Utils.fit_transform(shaper, normalizer)

    print(shaper[0].shape)
    print(shaper[2].shape)

    print(shaper[1].shape)
    print(shaper[3].shape)

    model_def = ModelBuilder.create_model('Gradient Boosting Regressor',
                                          shape='2,2',
                                          config='{"random_state":0}')
    model = ModelBuilder.train_model(model_def, shaper, 'true')
    result = ModelBuilder.predict_model(model, shaper)
    result = Utils.inverse_transform(result, normalizer)
    print(result)
Ejemplo n.º 14
0
tokenizer = None
label_encoder = None
encoded_labels = None
inputdata = None
test_df = None
text_X = None
test_Y = None
shaper = None
model = None


from reader import Reader
from dataloader import Utils
from dataloader import DataSet
from model import ModelBuilder
from writer import Writer

if __name__ == "__main__" :

  df = Reader.read_csv("data","sentiment_train.csv",config="default",streamType="csv",columns="0,1",filter="full",count=5)
  tokenizer =  Utils.get_text_tokenizer(df,1)
  label_encoder = Utils.get_label_encoder(df,0)
  encoded_labels = DataSet.get_encodered_labels(df,0,label_encoder)
  inputdata = DataSet.text_to_matrix(df,1,tokenizer)
  test_df = Reader.read_csv("data","sentiment_test.csv",config="default",streamType="df",columns="0,1",filter="full",count=5)
  text_X = DataSet.text_to_matrix(test_df,1,tokenizer)
  test_Y = DataSet.get_encodered_labels(test_df,0,label_encoder)
  shaper = tuple([inputdata, encoded_labels, text_X, test_Y])
  model = ModelBuilder.train_model((ModelBuilder.create_model('KNN Classifier',shape='2,2',config='{"n_neighbors":1,"algorithm":"ball_tree","weights":"distance"}')),shaper,'true')
  Writer.write_csv((DataSet.get_label((ModelBuilder.predict_model(model,text_X)),0,label_encoder)),"default")
                      config="default",
                      streamType="csv",
                      columns="0,1",
                      filter="full",
                      count=5,
                      header="1",
                      transformers=None)
 tokenizer = Utils.get_text_tokenizer(df, 1)
 label_encoder = Utils.get_label_encoder(df, 0)
 encoded_labels = DataSet.get_encodered_labels(df, 0, label_encoder)
 inputdata = DataSet.text_to_matrix(df, 1, tokenizer)
 modeldef = ModelBuilder.create_model(
     'Keras Sequential Model',
     shape='2,2',
     config=
     ('{"loss_function":"categorical_crossentropy","optimizer":"adam"}', [
         '{ "layer_type":"Dense" ,"activation":"relu","optimizer":"Adam","threshold":"512","input_shape":"10000,"}',
         '{ "layer_type":"Dropout" ,"activation":"relu","optimizer":"Adam","threshold":".5","input_shape":""}',
         '{ "layer_type":"Dense" ,"activation":"softmax","optimizer":"Adam","threshold":"4","input_shape":""}'
     ]))
 test_df = Reader.read_csv("data",
                           "sentiment_test.csv",
                           config="default",
                           streamType="df",
                           columns="0,1",
                           filter="full",
                           count=5,
                           header="1",
                           transformers=None)
 text_X = DataSet.text_to_matrix(test_df, 1, tokenizer)
 test_Y = DataSet.get_encodered_labels(test_df, 0, label_encoder)
Ejemplo n.º 16
0
modeldef = None
test_df = None
text_X = None
test_Y = None
shaper = None
model = None
result = None


from reader import Reader
from dataloader import Utils
from dataloader import DataSet
from model import ModelBuilder

if __name__ == "__main__" :

  df = Reader.read_csv("data","sentiment_train.csv",config="default",streamType="csv",columns="0,1",filter="full",count=5,header="1",transformers=None)
  tokenizer =  Utils.get_text_tokenizer(df,1)
  label_encoder = Utils.get_label_encoder(df,0)
  encoded_labels = DataSet.get_encodered_labels(df,0,label_encoder)
  inputdata = DataSet.text_to_matrix(df,1,tokenizer)
  modeldef = ModelBuilder.create_model('KNN Classifier',shape='2,2',config='{"n_neighbors":1,"algorithm":"ball_tree","weights":"distance"}')
  test_df = Reader.read_csv("data","sentiment_test.csv",config="default",streamType="df",columns="0,1",filter="full",count=5,header="1",transformers=None)
  text_X = DataSet.text_to_matrix(test_df,1,tokenizer)
  test_Y = DataSet.get_encodered_labels(test_df,0,label_encoder)
  shaper = tuple([inputdata, encoded_labels, text_X, test_Y])
  model = ModelBuilder.train_model(modeldef,shaper,'true')
  result = ModelBuilder.predict_model(model,text_X)
  result = DataSet.get_label(result,0,label_encoder)
  print(result)
Ejemplo n.º 17
0
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("device :", device)

    # ground truth
    f = open('ground_truth/new_record.txt', 'r')

    # create model
    model = ModelBuilder()

    # load model
    checkpoint = torch.load("pretrained_model/model.pth",
                            map_location=lambda storage, loc: storage.cpu())

    model.load_state_dict(checkpoint)
    model.eval().to(device)

    # build tracker
    tracker = build_tracker(model)

    first_frame = True
    root = "test"
    video_name = root.split('/')[-1].split('.')[0]
    cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN)

    a = 0
    first_time = True
    current_target = -1
    for frame, focal in get_frames(root):
        a += 1
        if first_frame:
            try:
                init_rect = cv2.selectROI(video_name, frame, False, False)
            except:
                exit()
            tracker.init(frame, init_rect)
            first_frame = False
        else:
            ''' 전체 범위 방법 '''
            max_index = tracker.get_cls(focal)
            current_target = max_index
            ''' 범위 지정 방법 '''
            # if first_time:
            #     max_index = tracker.get_cls(focal)
            #     current_target = max_index
            #     first_time = False
            # else:
            #     max_index = tracker.get_cls(
            #         focal[current_target-3:current_target+3])
            #     if max_index > 3:
            #         current_target = current_target + abs(3 - max_index)
            #     elif max_index < 3:
            #         current_target = current_target - abs(3 - max_index)

            print("Focal Image Index: ", current_target)

            output = tracker.track(cv2.imread(focal[current_target]))

            bbox = list(map(int, output['bbox']))

            # ground truth
            line = f.readline()
            bbox_label = line.split(',')
            bbox_label = list(map(int, bbox_label))
            left_top_label = (bbox_label[0], bbox_label[1])
            right_bottom_label = (bbox_label[0] + bbox_label[2],
                                  bbox_label[1] + bbox_label[3])

            left_top = (bbox[0], bbox[1])
            right_bottom = (bbox[0] + bbox[2], bbox[1] + bbox[3])

            center = ((left_top[0] + right_bottom[0]) / 2,
                      (left_top[1] + right_bottom[1]) / 2)
            center_label = ((left_top_label[0] + right_bottom_label[0]) / 2,
                            (left_top_label[1] + right_bottom_label[1]) / 2)

            distance = ((center[0] - center_label[0])**2 +
                        (center[1] - center_label[1])**2)**0.5

            result_cls = open('ground_truth/result_cls.txt', 'a')
            result_cls.write(str(distance) + ',')
            result_cls.close()

            cv2.rectangle(frame, left_top, right_bottom, (0, 255, 0), 3)
            cv2.putText(frame, str(current_target + start_num), (30, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
            cv2.putText(frame, str(distance), (30, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255))
            cv2.imshow(video_name, frame)
            '''output 이미지 저장'''
            save_path = os.path.join('data/result', '{:03d}.jpg'.format(a))
            cv2.imwrite(save_path, frame)
            ''''''
            cv2.waitKey(40)
Ejemplo n.º 18
0
 df = Reader.read_csv("data",
                      "NSE_Abbott India Limited.csv",
                      config="default",
                      streamType="csv",
                      columns="",
                      filter="full",
                      count=5,
                      header="1",
                      transformers=None)
 shaper = DataSet.shape_data_frame(df,
                                   '',
                                   x_columns='1:9',
                                   y_columns='3',
                                   x_dimention='3',
                                   y_dimention='1',
                                   y_offset=1,
                                   test_data_size=20)
 normalizer = Utils.get_preprocessing_scaler(min_max_tuple=(-1, 1))
 shaper = Utils.fit_transform(shaper, normalizer)
 model_def = ModelBuilder.create_model(
     'Keras Sequential Model',
     shape='2,2',
     config=('{"loss_function":"mean_absolute_error","optimizer":"adam"}', [
         '{ "layer_type":"LSTM" ,"activation":"tanh","optimizer":"Adam","threshold":"100","input_shape":"1,8"}',
         '{ "layer_type":"Dropout" ,"activation":"sigmoid","optimizer":"sgd","threshold":"0.2","input_shape":""}',
         '{ "layer_type":"Dense" ,"activation":"linear","optimizer":"Adam","threshold":"1","input_shape":""}'
     ]))
 model = ModelBuilder.train_model(model_def, shaper, 'true')
 result = ModelBuilder.predict_model(model, shaper)
 result = Utils.inverse_transform(result, normalizer, axis='y')
 print(result)
Ejemplo n.º 19
0
        contextSentenceLengths = [[len(sentence) for sentence in context] for context, question, answer in samples]
        maxContextSentenceLen = max([max(sentenceLengthInContext) for sentenceLengthInContext in contextSentenceLengths])
        questionLengths = [len(question) for context, question, answer in samples]
        maxQuestionLen = max(questionLengths)
        #build tensors from data and apply padding
        emptySentence = [0]*maxContextSentenceLen#empty sentence for batch context padding
        contextInput = sum([[sentence + [0]*(maxContextSentenceLen - len(sentence)) for sentence in context] for context, question, answer in samples], [])#concatenated
        #contextInput = [[sentence + [0]*(maxContextSentenceLen - len(sentence)) for sentence in context] + [emptySentence]*(maxContextLen - len(context)) for context, question, answer in samples]
        contextSentenceLengths = sum(contextSentenceLengths, [])#concatenated
        #contextSentenceLengths = [sentenceLengths + [1]*(maxContextLen - len(sentenceLengths)) for sentenceLengths in contextSentenceLengths]#apply padding for tensorflow tensor - padding with 1 instead of 0 so sequence-end-selectors dont fail with bufferunderrun
        questionInput = [question + [0]*(maxQuestionLen - len(question)) for context, question, answer in samples]
        answerInput = [answer for context, question, answer in samples]
        yield contextInput, contextLengths, contextSentenceLengths, questionInput, questionLengths, answerInput

#build the whole model and run it
modelBuilder = ModelBuilder(batch_size, macro_batch_size, question_dim, obj_dim, dictSize, args.questionAwareContext, args.f_layers, args.f_inner_layers, args.g_layers, args.h_layers, args.appendPosVec, args.batchNorm, args.layerNorm, args.weightPenalty)

(inputContext, inputContextLengths, inputContextSentenceLengths, inputQuestion, inputQuestionLengths, objects, question) = modelBuilder.buildWordProcessorLSTMs()

if modelToUse == 1:
    print("Using model I")
    (rnOutput, isTraining) = modelBuilder.buildRN_I(objects, question)
elif modelToUse == 2:
    print("Using model II")
    (rnOutput, isTraining) = modelBuilder.buildRN_II(objects, question)
elif modelToUse == 3:
    print("Using model III")
    (rnOutput, isTraining) = modelBuilder.buildRN_III(objects, question)
elif modelToUse == 4:
    print("Using model IV")
    (rnOutput, isTraining) = modelBuilder.buildRN_IV(objects, question)
Ejemplo n.º 20
0
def builder():
    if sys.argv[2] == '-h' or sys.argv[2] == '-help' or sys.argv[2] == '--help':
        builderHelp()
        return

    cwd = os.getcwd()
    cwdcat = cwd.partition('model')
    os.chdir(f'{cwdcat[0]}/model/')

    if len(sys.argv) < 5:
        logger.error(
            'Please follow format of modelBuilder.py [datasheet] -s [save_path] -k [k-neighbors] -t [Time] -h [hash] -d [DEBUGGING]'
        )
        sys.exit()

    if not os.path.isdir('dataset'):
        os.mkdir('dataset')

    if not os.path.isfile(sys.argv[2]):
        # fetch n download it
        path = sys.argv[2].split('/')
        filename = path[len(path) - 1].split('.')[0]
        datasetConfig = ModelUtils.fetchDatasetConfig()
        r = requests.get(datasetConfig['url'], allow_redirects=True)
        with open(f'./dataset/{filename}.{datasetConfig["type"]}', 'wb') as f:
            f.write(r.content)
        logger.debug('Successfully downloaded data')

    if not os.path.isdir('build'):
        os.mkdir('build')

    if not os.path.isdir('bin'):
        os.mkdir('bin')

    if not os.path.isdir('bin/currentModels'):
        os.mkdir('bin/currentModels')

    if not os.path.isdir('bin/currentModels/fasttext'):
        os.mkdir('bin/currentModels/fasttext')

    if not os.path.isdir('bin/currentModels/knn'):
        os.mkdir('bin/currentModels/knn')

    if not os.path.isdir('bin/oldModels'):
        os.mkdir('bin/oldModels')

    if not os.path.isdir('bin/oldModels/fasttext'):
        os.mkdir('bin/oldModels/fasttext')

    if not os.path.isdir('bin/oldModels/knn'):
        os.mkdir('bin/oldModels/knn')

    if not os.path.isdir('bin/newModels'):
        os.mkdir('bin/newModels')

    if not os.path.isdir('bin/newModels/fasttext'):
        os.mkdir('bin/newModels/fasttext')

    if not os.path.isdir('bin/newModels/knn'):
        os.mkdir('bin/newModels/knn')

    if not os.path.isdir('bin/newModels/injectModels'):
        os.mkdir('bin/newModels/injectModels')

    if not os.path.isdir('bin/newModels/injectModels/fasttext'):
        os.mkdir('bin/newModels/injectModels/fasttext')

    if not os.path.isdir('bin/newModels/injectModels/knn'):
        os.mkdir('bin/newModels/injectModels/knn')

    k: int = 10
    savePath: str = ''
    hash: str = ''
    d: bool = False
    time: int = 5400

    for index, item in enumerate(sys.argv, 0):
        if item == '-s' and index + 1 < len(sys.argv):
            savePath = f'{sys.argv[index + 1]}'
        if item == '-h' and index + 1 < len(sys.argv):
            hash = f'{sys.argv[index + 1]}'
        if item == '-k' and index + 1 < len(sys.argv):
            k = int(sys.argv[index + 1])
        if item == '-d':
            d = True
        if item == '-t' and index + 1 < len(sys.argv):
            time = int(sys.argv[index + 1])

    try:
        ModelBuilder.cleanFiles(hash)
        ModelBuilder.createModels(filePath=sys.argv[2],
                                  savePath=savePath,
                                  k=k,
                                  hash=hash,
                                  debug=d,
                                  time=time)
    except ModelException as e:
        logger.critical(str(e))
        print('Please check -h for help.')
    except Exception as e:
        logger.critical('Stack:', str(e))
        print('Please check -h for help.')
    finally:
        ModelBuilder.cleanFiles(hash)
        os.chdir(cwd)
 active_positive_class.append(Vaihingen_class.Tree);
 active_positive_class.append(Vaihingen_class.Car);
 active_positive_class.append(Vaihingen_class.Low_vegetation);
 classes = len(active_positive_class) + 1;
 
     
 super_pixel_dataset_training = Super_pixel_seg_dataset(tiff_path, 
                                                        label_path,
                                                        plot,
                                                        active_positive_class);
 
 """You also can set parameters for different model."""
 model = ModelBuilder(PAI_FLAGS=FLAGS,
                      input_shape=input_shape, 
                      classes=classes, 
                      model_name=model_name,
                      model_alias_name=model_alias_name,
                      load_weights=load_weights_path,
                      original_resnet=original_resnet);
 
 super_pixel_dataset_training.prepare_superpixel_dataset(is_train=True,
                                                         load_ids=train_ids,
                                                         n_segments=n_segments,
                                                         batch_size=batch_size,
                                                         is_augment=True, 
                                                         rotate_clip=True, 
                                                         random_histogram_eq=0.2, 
                                                         random_brightness=(0.5, 2.0), 
                                                         random_intensity=0.2,
                                                         random_flip=0.75,
                                                         model_input_pixel_size=(input_shape[0], input_shape[1]),