Esempio n. 1
0
def get_line(data, lr=0.0001):
    X = np.array([p[0] for p in data]).reshape(-1, 1)
    y = np.array([p[1] for p in data]).reshape(-1, 1)

    losses = []
    model = Model([Dense(1, 1)], MSE())

    for _ in range(50):
        h = model.forward(X)
        losses.append(model.backward(h, y, lr))

    layer = model.layers[0]
    return layer.W[0, 0], layer.b[0, 0], losses
Esempio n. 2
0
def main(argv):
    """
    When the model has built, then load data real-time to predict the state at the moment.

    :param argv:
    argv[0]: client ID
    argv[1]: connect_port_name

    :return:
    """
    _ID = argv[0]
    _PORT_NAME = argv[1]

    model = Model.read_from_file(_ID)
    p_model = PresentationModel.apply(model)

    print('>> Start to receive data...')

    # open serial port
    ser = serial.Serial(_PORT_NAME, 9600)
    for _ in range(20):
        ser.readline()

    while True:
        try:
            # retrieve the line
            line = ser.readline().decode()
            data = [float(val) for val in line.split(',')]

            # no missing column in the data
            if len(data) == 3:
                # calculate mean gap
                p_model.add(data)

                # is "gap" in K-envelope?
                state = p_model.predict()
                print("OK" if state == 0 else "warning !!!")

                # put result into the target file
                fp = open(p_model.TARGET_FILE, 'w')
                fp.write(str(state))
                fp.close()

        except KeyboardInterrupt:
            print('>> exiting !')
            break
        except IOError:
            continue
Esempio n. 3
0
def main(argv):
    """
    When the model has built, then load data real-time to predict the state at the moment.

    :param argv:
    argv[0]: client_ID
    argv[1]: connect_port_name
    :return:
    """
    _ID = argv[0]
    _PORT_NAME = argv[1]

    model = Model.read_from_file(_ID)
    p_model = PresentationModel.apply(model)

    print('>> Start to receive data...')

    # open serial port
    ser = serial.Serial(_PORT_NAME, 9600)
    for _ in range(20):
        ser.readline()

    while True:
        try:
            line = ser.readline()
            data = [float(val) for val in line.decode().split(',')]
            if len(data) == 3:
                p_model.add_to_buffer(data)

                prediction = p_model.predict()
                p_model.add_to_pool(prediction)

                print(p_model.mean_buffer)
                print('%f => res:%d' % (p_model.now_mean, prediction))

                fp = open(p_model.TARGET_FILE, 'w')
                fp.write(str(p_model.take_result()))
                fp.close()

        except KeyboardInterrupt:
            break
    # close serial
    ser.flush()
    ser.close()
Esempio n. 4
0
def main(argv):
    """
    When the model has built, then load data real-time to predict the state at the moment.

    :param argv:
    argv[0]: client_ID
    argv[1]: file_name (with .csv)
    :return:
    """
    _ID = argv[0]
    _FILE_NAME = argv[1]

    model = Model.read_from_file(_ID)
    p_model = PresentationModel.apply(model)

    print('>> Start to receive data...')

    fp = open(_FILE_NAME, 'r')

    for line in fp:
        try:
            data = [float(val) for val in line.split(',')[1:]]
            if len(data) == 3:
                p_model.add_to_buffer(data)

                prediction = p_model.predict()
                p_model.add_to_pool(prediction)

                print(p_model.mean_buffer)
                print('%f => res:%d' % (p_model.now_mean, prediction))

                fp = open(p_model.TARGET_FILE, 'w')
                fp.write(str(p_model.take_result()))
                fp.close()

        except KeyboardInterrupt:
            break
    # close serial
    fp.close()
    ts.MEAN,
    ts.STD,
    ts.IMAGE_HEIGHT,
    ts.IMAGE_WIDTH,
    random_hor_flipping=ts.HORIZONTAL_FLIPPING,
    random_ver_flipping=ts.VERTICAL_FLIPPING,
    random_90x_rotation=ts.ROTATION_90X,
    random_rotation=ts.ROTATION,
    random_color_jittering=ts.COLOR_JITTERING,
    use_coordinates=ts.USE_COORDINATES)

test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=opt.batchsize,
                                          shuffle=False,
                                          num_workers=opt.nworkers,
                                          pin_memory=pin_memory,
                                          collate_fn=test_align_collate)

# Define Model
model = Model(opt.dataset, ts.N_CLASSES, ts.MAX_N_OBJECTS,
              use_instance_segmentation=ts.USE_INSTANCE_SEGMENTATION,
              use_coords=ts.USE_COORDINATES, load_model_path=opt.model,
              usegpu=opt.usegpu)

# Train Model
model.fit(ts.CRITERION, ts.DELTA_VAR, ts.DELTA_DIST, ts.NORM, ts.LEARNING_RATE,
          ts.WEIGHT_DECAY, ts.CLIP_GRAD_NORM, ts.LR_DROP_FACTOR,
          ts.LR_DROP_PATIENCE, ts.OPTIMIZE_BG, ts.OPTIMIZER, ts.TRAIN_CNN,
          opt.nepochs, ts.CLASS_WEIGHTS, train_loader, test_loader,
          model_save_path, opt.debug)
Esempio n. 6
0
from lib import Model, Engine
import datetime
import plotly.offline as py
import plotly.graph_objs as go

################# SETUP ##################
m = Model("lacoperon")
e = Engine()

################ SET PARAMS #################
dt = 0.0000025
t1 = 80
smplrt = int((t1 / dt) / 1000)

params = m.pArr(1, 0.23, 15, 50, 0.001, 960, 2.4, 0.0000003, 12, 0.0000003,
                4800, 0.5, 0.01, 30, 0.12, 0.1, 60000, 0.92, 0.462, 0.462, 0.2,
                0.2, 0.2, 0.2, 0.2, 25000)

params = m.pArr(0.154977514043, 1.16462169406, 7.03433844184, 154.746207648,
                0.00019537219059, 7118.81963368, 24.6690045197,
                2.38337847508e-07, 4.11519429287, 1.95971542799e-07,
                47383.95692, 0.387249488649, 0.0220698650347, 22.6682218121,
                0.00422368688352, 0.958937842327, 13966.6093417, 0.84513174251,
                6.1302738193, 3.82796984736, 0.0811790521043, 0.0740019990288,
                0.0443714858756, 0.042983581708, 6.00254638463, 37610.8755213)
# params = m.pArr(2.87291065265, 0.209400206678, 7.51636699019, 1180.76712299, 0.000289911945754, 189.53551479, 8.23132491843, 4.27639769966e-07, 2.22698494574, 6.85695326561e-06, 825.049766525, 0.567406272303, 0.0975143681116, 11.7119727273, 0.0396695147201, 1.51547287798, 40385.761299, 17.4839005396, 5.29433419242, 0.955242980096, 0.0406939846717, 0.740798430267, 0.654389301377, 0.404157205932, 0.132430369897, 38981.1412208)
initialVals = m.iArr(0, 0, 0, 1, 0, 0, 0, 0, 0)

# uncomment below to use model defaults
# params = None
# initialVals = None
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from lib import Model
from lib import FaissIndex
from lib import Extractor
import numpy as np
import os

# Specify path to images in the repo
base_path = os.getcwd() + "/app/frontend/build/assets/semsearch/datasets/"
fashion_images_dir = base_path + "fashion200/"
iconic_images_dir = base_path + "iconic200/"
efficientnet_model = Model(model_name="efficientnetb0")
extractor = Extractor()


def create_index(image_dir, index_save_dir):
    """Create an FAISS index and save to disc

    Args:
        image_dir (str): path to directory of images whose features are added to the index
        index_save_dir (str): directory to save index on disc.     

    Returns:
        lib.FaissIndex:  lib.FaissIndex object.
    """
    features, ids = extractor.extract_from_dir(image_dir, efficientnet_model)
    index = FaissIndex(features.shape[1])
    os.makedirs(output_path)
except:
    pass

image_paths = glob.glob(os.path.join(image_folder, '*' + image_prefix))

model_dir = os.path.dirname(model_path)
sys.path.insert(0, model_dir)

from lib import Model, Prediction
from settings import ModelSettings

ms = ModelSettings()

model = Model(ms.LABELS, (1, 3, 256, 256),
              load_model_path=model_path,
              usegpu=opt.usegpu)
prediction = Prediction(ms.IMAGE_SIZE_HEIGHT, ms.IMAGE_SIZE_WIDTH, ms.MEAN,
                        ms.STD, model)

for image_path in image_paths:
    image, pred, var, v = prediction.predict(image_path, n_samples=10)
    image_name = os.path.splitext(os.path.basename(image_path))[0]

    vis_pred_array = np.array(pred)
    vis_pred_array = vis_pred_array * (255.0 / np.max(vis_pred_array))
    vis_pred = Image.fromarray(vis_pred_array.astype('uint8'))
    vis_pred = ImageOps.colorize(vis_pred, (0, 0, 255), (255, 0, 0))

    vis_var_array = np.array(var)
    vis_var_array = vis_var_array * (255.0 / np.max(vis_var_array))
Esempio n. 9
0
model_dir = os.path.dirname(args.model)

# Load Seeds
random.seed(s.SEED)
np.random.seed(s.SEED)
torch.manual_seed(s.SEED)

# Load Data
data = Data(data_file=args.data,
            input_horizon=s.INPUT_HORIZON,
            n_stations=args.n_stations,
            train_ratio=s.TRAIN_RATIO,
            val_ratio=s.VAL_RATIO,
            debug=False)

# Load Model
model = Model(args.n_stations,
              s.MOVING_HORIZON,
              s.ACTIVATION,
              s.CRITERION,
              load_model_path=args.model,
              usegpu=args.usegpu)

# Train First RNN
_, _, [X_test, y_test] = data.load_data_lstm_1()

print '\n\n' + '#' * 10 + ' TESTING ' + '#' * 10
prediction_test = model.test([X_test, y_test])
draw_graph_all_stations(model_dir, data, args.n_stations, y_test,
                        prediction_test)
Esempio n. 10
0
    device = torch.device("cuda")

    save_path = os.path.join("saves", "ppo-samples")
    os.makedirs(save_path, exist_ok = True)

    env = gym.make(ENV_ID)
    test_env = gym.make(ENV_ID)
    
    env.init_dart()
    env.init_sim()
    test_env.init_sim()

    #env.start_render()
    #print(ENV_ID)
    #input()
    net_act = Model.ModelActor(env.observation_space.shape[0], env.action_space.shape[0]).to(device)
    net_crt = Model.ModelCritic(env.observation_space.shape[0]).to(device)
    print(net_act)
    print(net_crt)

    writer = SummaryWriter(comment="-ppo_Sample3d")
    agent = Model.AgentA2C(net_act, device = device)
    exp_source = ptan.experience.ExperienceSource(env,agent,steps_count = 1, steps_delta = 1)

    opt_act = optim.Adam(net_act.parameters(), lr = LEARNING_RATE_ACTOR)
    opt_crt = optim.Adam(net_crt.parameters(), lr=LEARNING_RATE_CRITIC)

    trajectory = []
    best_reward = None

    what = 0
Esempio n. 11
0
from lib import Model
from lib.nodes import Dense, Tanh
from lib.losses import MSE

X = np.array([
    [0, 0],
    [0, 1],
    [1, 0],
    [1, 1]
])

y = np.array([0, 1, 1, 0]).reshape(-1, 1)

lr = 0.1

errs = []
for _ in range(20):
    model = Model([Dense(2, 4), Tanh(), Dense(4, 1)], MSE())
    for _ in range(1000):
        pred = model.forward(X)
        model.backward(pred, y, lr)

    err = np.abs(y - model.forward(X)).sum()
    errs.append(err)


plt.hist(errs)
plt.show()


Esempio n. 12
0
def main():

    bins_no = 2

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    weights_path = os.path.abspath(os.path.dirname(__file__)) + '/weights'
    weight_list = [x for x in sorted(os.listdir(weights_path)) if x.endswith('.pkl')]
    if len(weight_list) == 0:
        print('We could not find any model weight to load, please train the model first!')
        exit()
    else:
        print('Using model weights : %s'%weight_list[-1])
        my_vgg = models.vgg19_bn(pretrained=True)
        model = Model.Model(features=my_vgg.features, bins=bins_no).to(device)
        if use_cuda: 
            checkpoint = torch.load(weights_path + '/%s'%weight_list[-1])
        else: 
            checkpoint = torch.load(weights_path + '/%s'%weight_list[-1],map_location='cpu')
        model.load_state_dict(checkpoint['model_state_dict'])
        model.eval()

    # Load Yolo
    yolo_path = os.path.abspath(os.path.dirname(__file__)) + '/weights'
    yolo = cv_Yolo(yolo_path)

    averages = ClassAverages.ClassAverages()
    angle_bins = generate_bins(bins_no)

    image_dir = FLAGS.val_img_path
    cal_dir = FLAGS.calb_path

    img_path = os.path.abspath(os.path.dirname(__file__)) + "/" + image_dir
    # using P_rect from global calibration file instead of per image calibration
    calib_path = os.path.abspath(os.path.dirname(__file__)) + "/" + cal_dir
    calib_file = calib_path + "calib_cam_to_cam.txt"
    # using P from each frame
    # calib_path = os.path.abspath(os.path.dirname(__file__)) + '/Kitti/testing/calib/'
   
    try:
        ids = [x.split('.')[0][-6:] for x in sorted(glob.glob(img_path+'/*.png'))]
    except:
        print("\nError: There are no images in %s"%img_path)
        exit()

    for id in ids:
        start_time = time.time()
        img_file = img_path + id + ".png"

        # Read in image and make copy
        truth_img = cv2.imread(img_file)
        img = np.copy(truth_img)
        yolo_img = np.copy(truth_img)
        
        # Run Detection on yolo
        detections = yolo.detect(yolo_img)

        # For each 2D Detection
        for detection in detections:

            if not averages.recognized_class(detection.detected_class):
                continue
            # To catch errors should there be an invalid 2D detection
            try:
                object = DetectedObject(img, detection.detected_class, detection.box_2d, calib_file)
            except:
                continue

            theta_ray = object.theta_ray
            input_img = object.img
            proj_matrix = object.proj_matrix
            box_2d = detection.box_2d
            detected_class = detection.detected_class

            input_tensor = torch.zeros([1,3,224,224]).to(device)
            input_tensor[0,:,:,:] = input_img

            [orient, conf, dim] = model(input_tensor)
            orient = orient.cpu().data.numpy()[0, :, :]
            conf = conf.cpu().data.numpy()[0, :]
            dim = dim.cpu().data.numpy()[0, :]
            dim += averages.get_item(detected_class)

            argmax = np.argmax(conf)
            orient = orient[argmax, :]
            cos = orient[0]
            sin = orient[1]
            alpha = np.arctan2(sin, cos)
            alpha += angle_bins[argmax]
            alpha -= np.pi

            if FLAGS.show_2D:
                location = plot_regressed_3d_bbox(img, proj_matrix, box_2d, dim, alpha, theta_ray, truth_img)
            else:
                location = plot_regressed_3d_bbox(img, proj_matrix, box_2d, dim, alpha, theta_ray)
            
            print('Estimated pose: %s'%location)

        if FLAGS.show_2D:
            numpy_vertical = np.concatenate((truth_img, img), axis=0)
            cv2.imshow('SPACE for next image, any other key to exit', numpy_vertical)
        else:
            cv2.imshow('3D detections', img)

        print("\n")
        print('Got %s poses in %.3f seconds'%(len(detections), time.time() - start_time))
        print('-------------')

        
        if cv2.waitKey(0) != 32: # space bar
            exit()
Esempio n. 13
0
def main():
    print("Initializing....")
    # ======= Hyper Parameters ======== #
    epochs = FLAGS.epochs
    batch_size = 16
    lr = 0.0001
    momentum = 0.9

    alpha = 0.6  #dimen
    w = 0.7  # orient

    exp_no = FLAGS.exp_no

    params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 6}
    # ================================== #

    print("Starting Experiment No. ", exp_no)
    print("Training for {} epochs ".format(epochs))

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print("Pytorch is using : ", device)

    print("Loading data...")
    train_path = os.path.abspath(os.path.dirname(__file__)) + '/Kitti/training'
    dataset = Dataset(train_path)

    generator = torch_data.DataLoader(dataset, **params)
    eval_dataset = Dataset(
        os.path.abspath(os.path.dirname(__file__)) + '/eval/train')
    averages = ClassAverages.ClassAverages()

    print("Loading model...")
    my_vgg = models.vgg19_bn(pretrained=True)
    model = Model(features=my_vgg.features).cuda()

    ## SHOW SUMMARY OF MODEL
    # summary(model,(3,244,244))

    opt_SGD = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
    conf_loss_func = nn.CrossEntropyLoss().cuda()
    dim_loss_func = nn.MSELoss().cuda()
    orient_loss_func = OrientationLoss

    # load any previous weights
    weights_path = os.path.abspath(os.path.dirname(__file__)) + '/weights/'
    latest_model = None
    first_epoch = 0

    if not os.path.isdir(weights_path):
        os.mkdir(weights_path)
    else:
        try:
            latest_model = [
                x for x in sorted(os.listdir(model_path)) if x.endswith('.pkl')
            ][-1]
        except:
            pass

    # Create Folders for this experiments
    for x in range(epochs):
        check_and_make_dir('Kitti/results/training/plots/exp_' + str(exp_no) +
                           "/epoch_%s/" % str(x + 1))
    check_and_make_dir(weights_path + "exp_" + str(exp_no) + '/')

    if latest_model is not None:

        checkpoint = torch.load(weights_path + '/%s' % model_lst[-1])
        # else:
        #     checkpoint = torch.load(weights_path + '/%s'%model_lst[-1],map_location='cpu')
        model.load_state_dict(checkpoint['model_state_dict'])
        opt_SGD.load_state_dict(checkpoint['optimizer_state_dict'])
        first_epoch = checkpoint['epoch']
        loss = checkpoint['loss']

        print('Found previous checkpoint: %s at epoch %s' %
              (latest_model, first_epoch))
        print('Resuming training....')

    total_num_batches = int(len(dataset) / batch_size)
    losses = []
    epoch_losses = []
    dim_lossess = []
    theta_lossess = []
    orient_lossess = []

    print('Training is commencing....')
    for epoch in range(first_epoch + 1, epochs + 1):
        # model.train(True)
        curr_batch = 0
        passes = 0
        # Training Loop
        for local_batch, local_labels in generator:

            truth_orient = local_labels['Orientation'].float().cuda()
            truth_conf = local_labels['Confidence'].long().cuda()
            truth_dim = local_labels['Dimensions'].float().cuda()

            local_batch = local_batch.float().cuda()
            [orient, conf, dim] = model(local_batch)

            orient_loss = orient_loss_func(orient, truth_orient, truth_conf)
            dim_loss = dim_loss_func(dim, truth_dim)

            truth_conf = torch.max(truth_conf, dim=1)[1]
            conf_loss = conf_loss_func(conf, truth_conf)

            loss_theta = conf_loss + w * orient_loss
            loss = alpha * dim_loss + loss_theta

            opt_SGD.zero_grad()
            loss.backward()
            opt_SGD.step()

            if passes % 50 == 0:
                print("--- epoch %s | batch %s/%s --- [loss: %s]" %
                      (epoch, curr_batch, total_num_batches, loss.item()))
                passes = 0

            orient_lossess.append(orient_loss.item())
            dim_lossess.append(dim_loss.item())
            theta_lossess.append(loss_theta.item())
            losses.append(loss.item())
            passes += 1
            curr_batch += 1

        epoch_losses.append(loss.item())
        ### ++++++++++++++++++++++++++++++++++++++++++++
        # save after every 10 epochs
        if epoch % 1 == 0:
            name = weights_path + "exp_" + str(exp_no) + "/exp_" + str(
                exp_no) + '_epoch_%s.pkl' % epoch
            print("====================")
            print("Done with epoch %s!" % epoch)
            print("Saving weights as %s ..." % name)
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': opt_SGD.state_dict(),
                    'loss': loss
                }, name)
            print("====================")

        path = 'Kitti/results/training/plots/exp_' + str(exp_no) + '/'
        print("Saving Metric Graphs")

        plt.figure(figsize=(20, 8))
        plt.plot(orient_lossess)
        plt.ylabel('Overall Loss')
        plt.xlabel('Iterations')
        plt.savefig(path + "epoch_%s/" % epoch + "exp_" + str(exp_no) +
                    "_epoch_%s" % epoch + "_Orientation.png")
        plt.clf()

        plt.figure(figsize=(20, 8))
        plt.plot(dim_lossess)
        plt.ylabel('Dimension Loss')
        plt.xlabel('Iterations')
        plt.savefig(path + "epoch_%s/" % epoch + "exp_" + str(exp_no) +
                    "_epoch_%s" % epoch + '_Dimension.png')
        plt.clf()

        plt.figure(figsize=(20, 8))
        plt.plot(theta_lossess)
        plt.ylabel('Theta Loss')
        plt.xlabel('Iterations')
        plt.savefig(path + "epoch_%s/" % epoch + "exp_" + str(exp_no) +
                    "_epoch_%s" % epoch + '_Theta.png')
        plt.clf()

        plt.figure(figsize=(20, 8))
        plt.plot(losses)
        plt.ylabel('Overall Loss')
        plt.xlabel('Iterations')
        plt.savefig(path + "epoch_%s/" % epoch + "exp_" + str(exp_no) +
                    "_epoch_%s" % epoch + '_Overall-Loss.png')
        plt.clf()

        plt.figure(figsize=(20, 8))
        plt.plot(epoch_losses)
        plt.ylabel('Overall Loss')
        plt.xlabel('Epoch')
        plt.savefig(path + "epoch_%s/" % epoch + "exp_" + str(exp_no) +
                    "_epoch_%s" % epoch + '_Overall-Loss-per-Epoch.png')
        plt.clf()

    result_name = 'Kitti/results/training/results_exp_' + str(exp_no) + ".txt"

    check_and_make_dir(result_name)

    file = open(result_name, "w")

    best_loss_epoch_index = np.argmin(epoch_losses)
    best_loss = epoch_losses[best_loss_epoch_index]

    file.write("Epoch with the lowest loss : Epoch " +
               str(best_loss_epoch_index) + "   Loss: " + str(best_loss) +
               "\n")

    best_loss_epoch_index = np.argmin(orient_lossess)
    best_loss = orient_lossess[best_loss_epoch_index]

    file.write("Iteration with the lowest orientation loss : Iteration " +
               str(best_loss_epoch_index) + "   Loss: " + str(best_loss) +
               "\n")

    best_loss_epoch_index = np.argmin(dim_lossess)
    best_loss = dim_lossess[best_loss_epoch_index]

    file.write("Iteration with the lowest dimension loss : Iteration " +
               str(best_loss_epoch_index) + "   Loss: " + str(best_loss) +
               "\n")

    best_loss_epoch_index = np.argmin(theta_lossess)
    best_loss = theta_lossess[best_loss_epoch_index]

    file.write("Iteration with the lowest theta loss : Iteration " +
               str(best_loss_epoch_index) + "   Loss: " + str(best_loss) +
               "\n")

    file.close()
Esempio n. 14
0
import torchvision.transforms as transforms

# sets seeds for numpy, torch, etc...
# must do for DDP to work well
# seed_everything(123)

if __name__ == '__main__':
    # add args from trainer
    parser = ArgumentParser(add_help=False)
    parser = Trainer.add_argparse_args(parser)
    # give the module a chance to add own params
    parser = add_model_specific_args(parser)

    # parse params
    args = parser.parse_args()
    model = Model(hparams=args)

    # most basic trainer, uses good defaults
    trainer = Trainer.from_argparse_args(args)
    trainer.fit(model)

    # test
    PATH = "/work4/zhangyang/pl-test/01_pl_mnist/lightning_logs/version_0/checkpoints/epoch=51.ckpt"
    pretrained_model = model.load_from_checkpoint(PATH, hparams=args)
    pretrained_model.eval()

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])

    dataset = MNIST(args.data_dir,
Esempio n. 15
0
    def post(self, slug=None):
        start_time = time.clock()
        # Init User
        for i in range(random.randint(5, 40)):
            a = Model.User()
            a.username = self.getRstr(2, 7)
            a.password = self.getRstr(3, 8)
            a.lastip = self.getRip()
            a.created = self.getRtime()
            a.lastlogin = self.getRtime()
            a.setEmail(self.getRemail())
            a.put()
        users = Model.User.all().fetch(1000)
        ulen = len(users)

        # Init Category
        for i in range(random.randrange(30)):
            c = Model.Category()
            c.title = self.getRstr(2, 5)
            c.url = self.getRstr(2, 8)
            c.description = self.getRstr(10, 20)
            c.order = random.randrange(5)
            c.put()
        categorys = Model.Category.all().fetch(1000)
        clen = len(categorys)

        # Init Tag
        for i in range(random.randrange(40)):
            t = Model.Tag()
            t.title = self.getRstr(1, 5).upper()
            t.description = self.getRstr(10, 20)
            t.put()
        tags = Model.Tag.all().fetch(1000)
        tlen = len(tags)

        # Init Post
        for i in range(random.randrange(100)):
            a = Model.Post()
            a.category = categorys[random.randrange(clen)]
            a.author = users[random.randrange(ulen)]
            a.created = datetime.datetime.now()
            a.title = self.getRstr(3, 15)
            a.content = self.getRstr(50, 200)
            a.precontent = self.getRstr(20, 70)
            a.url = self.getRstr(3, 10)
            Base.processurl(a)
            a.views = random.randrange(110)
            a.put()

            # Init tags_post
            hastag = []
            for j in range(random.randrange(4)):
                t = tags[random.randrange(tlen)]
                if t in hastag:
                    continue
                else:
                    hastag.append(t)
                tp = Model.tags_posts()
                tp.tag = t
                tp.post = a
                tp.put()
        posts = Model.Post.all().fetch(1000)
        plen = len(posts)

        # Init Comment
        for i in range(random.randrange(20, 200)):
            cment = Model.Comment()
            cment.author = None if random.randrange(10) > 3 else users[random.randrange(ulen)]
            cment.belong = posts[random.randrange(plen)]
            cment.content = self.getRstr(10, 30)
            cment.created = self.getRtime()
            cment.nickname = self.getRstr(2, 10)
            cment.ip = self.getRip()
            cment.setWebsite(self.getRurl())
            cment.setEmail(self.getRemail())
            cment.put()

        # Init Attachment
        files = (
            os.path.join("static", "images", "foot-wp.gif"),
            os.path.join("static", "images", "layout2.png"),
            os.path.join("static", "css", "admin.css"),
            os.path.join("static", "js", "jquery-1.4.4.js"),
        )
        for i in range(random.randrange(5, 25)):
            attach = Model.Attachment()
            attach.belong = users[random.randrange(ulen)]
            attach.beuse = posts[random.randrange(plen)]
            attach.created = self.getRtime()
            afile = files[random.randrange(4)]
            attach.filename = os.path.split(afile)[1]
            attach.filetype = os.path.splitext(attach.filename)[1][1:]
            attach.content = Model.toBlob(open(afile).read())
            attach.filesize = len(attach.content)
            attach.put()

        end_time = time.clock()
        self.write(end_time - start_time)
Esempio n. 16
0
    test_sentence_inputs, test_aspect_text_inputs, test_positions, _ = example_reader.get_position_input(test_sentence_inputs,
                                                                                                         test_aspect_text_inputs)

    embedding_matrix = example_reader.get_embedding_matrix()
    position_ids = example_reader.get_position_ids(max_len=82)
    example_reader.convert_position(position_inputs=train_positions, position_ids=position_ids)
    example_reader.convert_position(position_inputs=test_positions, position_ids=position_ids)

    train_aspects = example_reader.pad_aspect_index(train_aspect_text_inputs.tolist(), max_length=9)
    test_aspects = example_reader.pad_aspect_index(test_aspect_text_inputs.tolist(), max_length=9)
    # ---------------------------------------------------------------------

    for i in range(5):
        model = m.build_model(max_len=82,
                              aspect_max_len=9,
                              embedding_matrix=embedding_matrix,
                              position_embedding_matrix=position_matrix,
                              class_num=3,
                              num_words=4582)  # 5144 4582 //   # 1523 // 1172
        evaluator = Evaluator(true_labels=test_true_labels, sentences=test_sentence_inputs, aspects=test_aspect_text_inputs)
        epoch = 1
        while epoch <= 80:
            model = m.train_model(sentence_inputs=train_sentence_inputs,
                                  position_inputs=train_positions,
                                  aspect_input=train_aspects,
                                  labels=train_aspect_labels,
                                  model=model)
            results = m.get_predict(sentence_inputs=test_sentence_inputs,
                                    position_inputs=test_positions,
                                    aspect_input=test_aspects,
                                    model=model)
            print("\n--------------epoch " + str(epoch) + " ---------------------")
Esempio n. 17
0
model_dir = os.path.dirname(model_path)
sys.path.insert(0, model_dir)

from lib import SegDataset, Model, AlignCollate
from settings import ModelSettings

ms = ModelSettings()

if torch.cuda.is_available() and not opt.usegpu:
    print('WARNING: You have a CUDA device, so you should probably run with --cuda')

# Define Data Loaders
pin_memory = False
if opt.usegpu:
    pin_memory = True

test_dataset = SegDataset(opt.lmdb)
test_align_collate = AlignCollate('test', ms.LABELS, ms.MEAN, ms.STD, ms.IMAGE_SIZE_HEIGHT, ms.IMAGE_SIZE_WIDTH,
                                  ms.ANNOTATION_SIZE_HEIGHT, ms.ANNOTATION_SIZE_WIDTH, ms.CROP_SCALE, ms.CROP_AR,
                                  random_cropping=ms.RANDOM_CROPPING, horizontal_flipping=ms.HORIZONTAL_FLIPPING,random_jitter=ms.RANDOM_JITTER)
assert test_dataset
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchsize, shuffle=False,
                                          num_workers=opt.nworkers, pin_memory=pin_memory, collate_fn=test_align_collate)

# Define Model
model = Model(ms.LABELS, load_model_path=model_path, usegpu=opt.usegpu)

# Test Model
test_accuracy, test_dice_coeff = model.test(ms.CLASS_WEIGHTS, test_loader)
Esempio n. 18
0
def main():

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    weights_path = os.path.abspath(os.path.dirname(__file__)) + '/weights'
    weight_list = [x for x in sorted(os.listdir(weights_path)) if x.endswith('.pkl')]

    if len(weight_list) == 0:
        print('We could not find any model weight to load, please train the model first!')
        exit()
    else:
        print ('Using previous model weights %s'%weight_list[-1])
        my_vgg = models.vgg19_bn(pretrained=True)
        model = Model.Model(features=my_vgg.features, bins=2)
        if use_cuda: 
            checkpoint = torch.load(weights_path + '/%s'%weight_list[-1])
        else: 
            checkpoint = torch.load(weights_path + '/%s'%weight_list[-1],map_location='cpu')
        model.load_state_dict(checkpoint['model_state_dict'])
        model.eval()

    # Load Test Images from validation folder
    dataset = Dataset(os.path.abspath(os.path.dirname(__file__)) + '/Kitti/validation/')
    all_images = dataset.all_objects()
    print ("Length of validation data",len(all_images))
    averages = ClassAverages.ClassAverages()

    all_images = dataset.all_objects()
    print ("Model is commencing predictions.....")
    for key in sorted(all_images.keys()):

        data = all_images[key]
        truth_img = data['Image']
        img = np.copy(truth_img)
        imgGT = np.copy(truth_img)
        objects = data['Objects']
        cam_to_img = data['Calib']
   
        for object in objects:

            label = object.label
            theta_ray = object.theta_ray
            input_img = object.img

            input_tensor = torch.zeros([1,3,224,224])
            input_tensor[0,:,:,:] = input_img
            input_tensor.cuda()

            [orient, conf, dim] = model(input_tensor)
            orient = orient.cpu().data.numpy()[0, :, :]
            conf = conf.cpu().data.numpy()[0, :]
            dim = dim.cpu().data.numpy()[0, :]

            dim += averages.get_item(label['Class'])

            argmax = np.argmax(conf)
            orient = orient[argmax, :]
            cos = orient[0]
            sin = orient[1]
            alpha = np.arctan2(sin, cos)
            alpha += dataset.angle_bins[argmax]
            alpha -= np.pi

            location = plot_regressed_3d_bbox_2(img, truth_img, cam_to_img, label['Box_2D'], dim, alpha, theta_ray)
            locationGT = plot_regressed_3d_bbox_2(imgGT, truth_img, cam_to_img, label['Box_2D'], label['Dimensions'], label['Alpha'], theta_ray)

            # print('Estimated pose: %s'%location)
            # print('Truth pose: %s'%label['Location'])
            # print('-------------')
        
        if not FLAGS.hide_imgs:
            
            numpy_vertical = np.concatenate((truth_img,imgGT, img), axis=0)
            cv2.imshow('2D detection on top, 3D Ground Truth on middle , 3D prediction on bottom', numpy_vertical)
            cv2.waitKey(0)

    print ("Finished.")
Esempio n. 19
0
    pass

model_dir = os.path.dirname(model_path)
sys.path.insert(0, model_dir)

from lib import Model, Prediction

if opt.dataset == 'CVPPP':
    from settings import CVPPPModelSettings
    ms = CVPPPModelSettings()
elif opt.dataset == 'microfibers':
    from settings import MicrofibersModelSettings
    ms = MicrofibersModelSettings()

model = Model(opt.dataset, ms.MODEL_NAME, ms.N_CLASSES, ms.MAX_N_OBJECTS,
              use_instance_segmentation=ms.USE_INSTANCE_SEGMENTATION,
              use_coords=ms.USE_COORDINATES, load_model_path=opt.model,
              usegpu=opt.usegpu)

prediction = Prediction(ms.IMAGE_HEIGHT, ms.IMAGE_WIDTH,
                        ms.MEAN, ms.STD, False, model,
                        1)

for image_name, image_path in zip(image_names, images_list):
    image, fg_seg_pred, ins_seg_pred, n_objects_pred = \
        prediction.predict(image_path)

    _output_path = os.path.join(output_path, image_name)

    try:
        os.makedirs(_output_path)
    except BaseException:
def main():

    exp_no = 34

    print("Generating evaluation results for experiment No. ", exp_no)

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    weights_path = os.path.abspath(
        os.path.dirname(__file__)) + '/weights/exp_' + str(exp_no) + '/'
    weight_list = [
        x for x in sorted(os.listdir(weights_path)) if x.endswith('.pkl')
    ]

    # Create out folder for pred-labels and pred-imgs

    for x in range(len(weight_list)):
        check_and_make_dir('Kitti/results/validation/labels/exp_' +
                           str(exp_no) + "/epoch_%s/" % str(x + 1))
    check_and_make_dir('Kitti/results/validation/pred_imgs/exp_' + str(exp_no))

    if len(weight_list) == 0:
        print(
            'We could not find any model weights to load, please train the model first!'
        )
        exit()

    for model_weight in weight_list:
        epoch_no = model_weight.split(".")[0].split('_')[-1]
        print("Evaluating for Epoch: ", epoch_no)

        print('Loading model with %s' % model_weight)
        my_vgg = models.vgg19_bn(pretrained=True)
        model = Model.Model(features=my_vgg.features, bins=2)
        if use_cuda:
            checkpoint = torch.load(weights_path + model_weight)
        else:
            checkpoint = torch.load(weights_path + model_weight)
        model.load_state_dict(checkpoint['model_state_dict'])
        model.eval()

        # Load Test Images from eval folder
        dataset = Dataset(
            os.path.abspath(os.path.dirname(__file__)) + 'Kitti/validation')
        all_images = dataset.all_objects()
        print("Length of eval data", len(all_images))
        averages = ClassAverages.ClassAverages()

        all_images = dataset.all_objects()
        print("Model is commencing predictions.....")
        for key in tqdm(sorted(all_images.keys())):

            data = all_images[key]
            truth_img = data['Image']
            img = np.copy(truth_img)
            imgGT = np.copy(truth_img)
            objects = data['Objects']
            cam_to_img = data['Calib']

            filename = "Kitti/results/validation/labels/exp_" + str(
                exp_no) + '/epoch_' + str(epoch_no) + "/" + str(key) + ".txt"
            check_and_make_dir(filename)
            file = open(filename, "w")

            for object in objects:

                label = object.label
                theta_ray = object.theta_ray
                input_img = object.img

                input_tensor = torch.zeros([1, 3, 224, 224])
                input_tensor[0, :, :, :] = input_img
                input_tensor.cuda()

                [orient, conf, dim] = model(input_tensor)
                orient = orient.cpu().data.numpy()[0, :, :]
                conf = conf.cpu().data.numpy()[0, :]
                dim = dim.cpu().data.numpy()[0, :]

                dim += averages.get_item(label['Class'])

                argmax = np.argmax(conf)
                orient = orient[argmax, :]
                cos = orient[0]
                sin = orient[1]
                alpha = np.arctan2(sin, cos)
                alpha += dataset.angle_bins[argmax]
                alpha -= np.pi

                location = plot_regressed_3d_bbox_2(img, truth_img, cam_to_img,
                                                    label['Box_2D'], dim,
                                                    alpha, theta_ray)
                locationGT = plot_regressed_3d_bbox_2(
                    imgGT, truth_img, cam_to_img, label['Box_2D'],
                    label['Dimensions'], label['Alpha'], theta_ray)

                file.write( \
                    #  Class label

                    str(label['Class']) + " -1 -1 " + \
                    # Alpha

                    str(round(alpha,2)) + " " + \
                    # 2D Bounding box coordinates

                    str(label['Box_2D'][0][0]) + " " + str(label['Box_2D'][0][1]) + " " + \
                        str(label['Box_2D'][1][0]) + " " + str(label['Box_2D'][1][1]) + " " + \
                    # 3D Box Dimensions

                    str(' '.join(str(round(e,2)) for e in dim)) + " " + \
                    # 3D Box Location

                    str(' '.join(str(round(e,2)) for e in location)) + " 0.0 " + \
                    # Ry

                    str(round(theta_ray + alpha ,2)) + " " + \
                    # Confidence

                    str( round(max(softmax(conf)),2) ) + "\n"
                )

                # print('Estimated pose: %s'%location)
                # print('Truth pose: %s'%label['Location'])
                # print('-------------')

            file.close()

            numpy_vertical = np.concatenate((truth_img, imgGT, img), axis=0)
            image_name = 'Kitti/results/validation/pred_imgs/exp_' + str(
                exp_no) + '/' + str(key) + "/epoch_" + epoch_no + '_' + str(
                    key) + '.jpg'
            check_and_make_dir(image_name)
            cv2.imwrite(image_name, numpy_vertical)

        print("Finished.")
Esempio n. 21
0
    random_hor_flipping=ts.HORIZONTAL_FLIPPING,
    random_ver_flipping=ts.VERTICAL_FLIPPING,
    random_transposing=ts.TRANSPOSING,
    random_90x_rotation=ts.ROTATION_90X,
    random_rotation=ts.ROTATION,
    random_color_jittering=ts.COLOR_JITTERING,
    random_grayscaling=ts.GRAYSCALING,
    random_channel_swapping=ts.CHANNEL_SWAPPING,
    random_gamma=ts.GAMMA_ADJUSTMENT,
    random_resolution=ts.RESOLUTION_DEGRADING)

test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=opt.batchsize,
                                          shuffle=False,
                                          num_workers=opt.nworkers,
                                          pin_memory=pin_memory,
                                          collate_fn=test_align_collate)

# Define Model
model = Model(opt.dataset, ts.MODEL_NAME, ts.N_CLASSES, ts.MAX_N_OBJECTS,
              use_instance_segmentation=ts.USE_INSTANCE_SEGMENTATION,
              use_coords=ts.USE_COORDINATES, load_model_path=opt.model,
              usegpu=opt.usegpu)

# Train Model
model.fit(ts.CRITERION, ts.DELTA_VAR, ts.DELTA_DIST, ts.NORM, ts.LEARNING_RATE,
          ts.WEIGHT_DECAY, ts.CLIP_GRAD_NORM, ts.LR_DROP_FACTOR,
          ts.LR_DROP_PATIENCE, ts.OPTIMIZE_BG, ts.OPTIMIZER, ts.TRAIN_CNN,
          opt.nepochs, ts.CLASS_WEIGHTS, train_loader, test_loader,
          model_save_path, opt.debug)
Esempio n. 22
0
                                  ts.LABELS,
                                  ts.MEAN,
                                  ts.STD,
                                  ts.IMAGE_SIZE_HEIGHT,
                                  ts.IMAGE_SIZE_WIDTH,
                                  ts.ANNOTATION_SIZE_HEIGHT,
                                  ts.ANNOTATION_SIZE_WIDTH,
                                  ts.CROP_SCALE,
                                  ts.CROP_AR,
                                  random_cropping=ts.RANDOM_CROPPING,
                                  horizontal_flipping=ts.HORIZONTAL_FLIPPING)
assert test_dataset
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=opt.batchsize,
                                          shuffle=False,
                                          num_workers=opt.nworkers,
                                          pin_memory=pin_memory,
                                          collate_fn=test_align_collate)

# Define Model
model = Model(ts.LABELS,
              [opt.batchsize, 3, ts.IMAGE_SIZE_HEIGHT, ts.IMAGE_SIZE_WIDTH],
              load_model_path=opt.model,
              usegpu=opt.usegpu)

# Train Model
model.fit(ts.CRITERION, ts.LEARNING_RATE, ts.WEIGHT_DECAY, ts.CLIP_GRAD_NORM,
          ts.LR_DROP_FACTOR, ts.LR_DROP_PATIENCE, ts.OPTIMIZE_BG, ts.OPTIMIZER,
          ts.TRAIN_CNN, opt.nepochs, ts.CLASS_WEIGHTS, train_loader,
          test_loader, model_save_path)
Esempio n. 23
0
parser.add_argument('--output',
                    required=True,
                    help='path of the output directory')
opt = parser.parse_args()

image_path = opt.image
model_path = opt.model
output_path = opt.output

try:
    os.makedirs(output_path)
except:
    pass

model_dir = os.path.dirname(model_path)
sys.path.insert(0, model_dir)

from lib import Model, Prediction
from settings import ModelSettings

ms = ModelSettings()

model = Model(ms.LABELS, load_model_path=model_path, usegpu=opt.usegpu)
prediction = Prediction(ms.IMAGE_SIZE_HEIGHT, ms.IMAGE_SIZE_WIDTH, ms.MEAN,
                        ms.STD, model)
image, pred = prediction.predict(image_path)

image_name = os.path.splitext(os.path.basename(image_path))[0]
image.save(os.path.join(output_path, image_name + '.png'))
pred.save(os.path.join(output_path, image_name + '-mask.png'))
Esempio n. 24
0
pin_memory = False
if args.usegpu:
    pin_memory = True

# Load Seeds
random.seed(s.SEED)
np.random.seed(s.SEED)
torch.manual_seed(s.SEED)

# Load Data
data = Data(data_file=args.data, input_horizon=s.INPUT_HORIZON,
            n_stations=args.n_stations, train_ratio=s.TRAIN_RATIO,
            val_ratio=s.VAL_RATIO, debug=args.debug)

# Load Model
model = Model(args.n_stations, s.MOVING_HORIZON, s.ACTIVATION, s.CRITERION, usegpu=args.usegpu)

# Train First RNN
[X_train, y_train], [X_val, y_val], [X_test, y_test] = data.load_data_lstm_1()

rnn_model_num = 1
print '#' * 10 + ' RNN 1 ' + '#' * 10

train_loader = torch.utils.data.DataLoader(Loader((X_train, y_train)), batch_size=args.batch_size, shuffle=True,
                                           num_workers=args.n_workers, pin_memory=pin_memory)

val_loader = torch.utils.data.DataLoader(Loader((X_val, y_val)), batch_size=args.batch_size, shuffle=False,
                                         num_workers=args.n_workers, pin_memory=pin_memory)

model.fit(rnn_model_num, s.LEARNING_RATE, s.WEIGHT_DECAY, s.CLIP_GRAD_NORM, s.LR_DROP_FACTOR, s.LR_DROP_PATIENCE, s.PATIENCE, 
          s.OPTIMIZER, s.N_EPOCHS[rnn_model_num - 1],