Ejemplo n.º 1
0
def build_model(num_classes):
    model = my_model(num_classes)
    model.compile(
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
        metrics=['accuracy'])
    return model
Ejemplo n.º 2
0
dt, dx = 0.02, 1  # Time step, space step
f_max = 20  # Maximum frequency
max_x, max_y = 4000, 4000  # Model extension

# Load model
data = np.fromfile(file="../../vel1_copy.bin",
                   dtype=np.float32,
                   count=-1,
                   sep='',
                   offset=0)

vp_asteroid = data.reshape(nx, ny)  # Velocity model
rho_asteroid = np.full((nx, ny), rho, dtype=int)  # Density model

true_model = my_model(vp=vp_asteroid,
                      rho=rho_asteroid,
                      max_x=max_x,
                      max_y=max_y)
true_model.vp.T.plot()

# %%
# ------------------------------------------------------------------------------
# CREATE NEW SALVUS PROJECT
# ------------------------------------------------------------------------------
get_ipython().system('rm -rf project_salvus')
vm = sn.model.volume.cartesian.GenericModel(name="true_model", data=true_model)
p = sn.Project.from_volume_model(path="project_salvus", volume_model=vm)

# stf
wavelet = sn.simple_config.stf.Ricker(center_frequency=0.5 * f_max)

# Sources
Ejemplo n.º 3
0
# Load model
data = np.fromfile(file="../../vel1_copy.bin",
                   dtype=np.float32,
                   count=-1,
                   sep='',
                   offset=0)

eps_asteroid = data.reshape(nx, ny)  # Velocity model
rho_asteroid = np.full((nx, ny), rho, dtype=int)  # Density model
mu_asteroid = np.full((nx, ny), 1, dtype=int)  # Magnetic Permeability
v_radar = c * eps_asteroid / eps_asteroid.max()  # Radar

true_model = my_model(vp=v_radar,
                      rho=rho_asteroid,
                      max_x=max_x,
                      max_y=max_y,
                      acoustic=False)
# true_model.vp.T.plot()

# %%
# ------------------------------------------------------------------------------
# CREATE NEW SALVUS PROJECT
# ------------------------------------------------------------------------------
get_ipython().system('rm -rf project_salvus')
vm = sn.model.volume.cartesian.GenericModel(name="true_model", data=true_model)
p = sn.Project.from_volume_model(path="project_salvus", volume_model=vm)

# stf
wavelet = sn.simple_config.stf.Ricker(center_frequency=0.5 * f_max)
Ejemplo n.º 4
0
import torch
from torchvision.models import AlexNet
from models.my_model import *
from torchviz import make_dot
 
x=torch.rand(8,3,256,512)
model=my_model()
y=model(x)


g = make_dot(y)

g.render('espnet_model', view=False) 
Ejemplo n.º 5
0
def start(batch_size, n_epochs, learning_rate):
    plot_path = "./plot"
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'

    device = choose_device()
    device = "cuda"
    my_model = models.my_model()
    my_model = my_model.to(device=device)
    # extract_object = PuppyDetection.extract_object()
    # extract_object = extract_object.to(device=device)

    # Load dataset
    train_data, test_data, classes = load_datasets()
    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(device=device)
    optimizer = optim.SGD(my_model.parameters(), lr=learning_rate)

    # start training
    def train(n_epochs):
        least_loss = 999
        loss_over_time = []

        my_model.train()

        for epoch in range(n_epochs):
            running_loss = 0.0

            for batch_i, data in enumerate(train_loader):
                # get the input images and their corresponding labels

                #################################  read boxes list from file  #################################
                dog_boxes = []
                dog_head_boxes = []

                original_images, labels, fileNames = data
                original_images = original_images.cuda()

                save_box_path = os.getcwd() + "/boxes/"
                for i in range(len(fileNames)):
                    save_fileName = fileNames[i].split('.')[0]
                    save_file_path = os.path.join(save_box_path,
                                                  save_fileName + '.txt')
                    with open(save_file_path) as f:
                        line = f.readlines()
                    dog = list(map(int, line[0].split(' ')[:-1]))
                    dog_head = list(map(int, line[1].split(' ')[:-1]))
                    dog_boxes.append(dog)
                    dog_head_boxes.append(dog_head)
                dog_head_boxes = torch.tensor(dog_head_boxes)

                #print("read test end")
                #################################  read boxes list from file  #################################

                inputs = []
                for i in range(len(dog_boxes)):
                    input = torch.tensor(
                        original_images[i]
                        [:,
                         int(dog_boxes[i][0]):int(dog_boxes[i][0] +
                                                  dog_boxes[i][2]),
                         int(dog_boxes[i][1]):int(dog_boxes[i][1] +
                                                  dog_boxes[i][3])])
                    input = torch.unsqueeze(input, dim=0)
                    input = F.upsample(input, (224, 224),
                                       mode='bilinear',
                                       align_corners=False)
                    input = torch.squeeze(input, dim=0)
                    inputs.append(input)
                inputs = torch.stack(inputs, dim=0)
                '''inputs = torch.squeeze(inputs, 1)
                print("size: ", inputs.size())'''
                if (device == "cuda"):
                    inputs, labels = inputs.cuda(), labels.cuda()

                # zero the parameter (weight) gradients
                optimizer.zero_grad()
                # forward pass to get outputs
                # pdb.set_trace()

                bbox, pred1, pred2 = my_model(inputs)

                # calculate the loss
                loss1 = criterion(pred1, labels)

                loss2 = criterion(pred2, labels)

                loss3, _ = bbox_loss(bbox, dog_head_boxes)
                loss3 = torch.tensor(loss3)

                temp1 = pred1[:, labels]
                temp1 = temp1.cpu().detach().numpy()
                temp2 = pred2[:, labels]
                temp2 = temp2.cpu().detach().numpy()
                loss4 = []
                for i in range(batch_size):
                    loss4.append(max(0, temp1[i][i] - temp2[i][i] + margin))
                # pdb.set_trace()
                loss4 = np.mean(loss4)
                loss4 = torch.tensor(loss4).cuda()
                #        loss4 = torch.tensor(loss4)
                # loss3 = torch.mean(loss3)
                loss3 = torch.mean(loss3).cuda()
                # pdb.set_trace()

                loss = loss1 + loss2 + loss3 + loss4
                #print(loss)

                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                # print loss statistics
                # to convert loss into a scalar and add it to running_loss, we use .item()
                # running_loss += loss.item()
                running_loss += loss

                if batch_i % 45 == 44:  # print every 45 batches
                    avg_loss = running_loss / 45
                    # record and print the avg loss over the 100 batches
                    loss_over_time.append(avg_loss)
                    print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(
                        epoch + 1, batch_i + 1, avg_loss))
                    running_loss = 0.0
            if epoch % 5 == 0:  # save every 10 epochs
                if (loss < least_loss):
                    torch.save(my_model.state_dict(), 'checkpoint.pt')
                    least_loss = loss

        print('Finished Training')
        return loss_over_time

    # pdb.set_trace()
    training_loss = train(n_epochs)

    # visualize the loss as the network trained
    '''fig = plt.figure()
    plt.plot(45 * np.arange(len(training_loss)), training_loss)
    plt.rc('xtick', labelsize=12)
    plt.rc('ytick', labelsize=12)
    plt.xlabel('Number of Batches', fontsize=12)
    plt.ylabel('loss', fontsize=12)
    plt.ylim(0, 5.5)  # consistent scale
    plt.tight_layout()
    if plot_path:
        plt.savefig(os.path.join(plot_path, "Loss_Over_Time"))
        print("saved")
    else:
        plt.show()
    plt.clf()'''

    # initialize tensor and lists to monitor test loss and accuracy
    if (device == "cuda"):
        test_loss = torch.zeros(1).cuda()
    else:
        test_loss = torch.zeros(1)
    class_correct = list(0. for i in range(len(classes)))
    class_total = list(0. for i in range(len(classes)))

    # set the module to evaluation mode
    state = torch.load("checkpoint.pt")
    my_model.load_state_dict(state)
    my_model.eval()

    for batch_i, data in enumerate(test_loader):
        dog_boxes = []
        dog_head_boxes = []

        original_images, labels, fileNames = data

        ##############to delete###########
        '''save_box_path = os.getcwd() + "/boxes/"
        print(save_box_path)
        try:
            os.mkdir(save_box_path)
        except OSError:
            print("Can't create folder")
        dog_box, dog_head_box = extract_object(original_images)
        # save to the file
        print("len: ", len(fileNames))
        for i in range(len(fileNames)):
            save_fileName = fileNames[i].split('.')[0]
            print("save_fileName: " + save_fileName)
            save_file_path = os.path.join(save_box_path, save_fileName + '.txt')
            with open(save_file_path, 'w') as f:
                for item in dog_box[i]:
                    f.write("%s " % item)
                f.write("\n")
                for item in dog_head_box[i]:
                    f.write("%s " % item)
            f.close()'''
        ############to delete##########

        #  original_images = original_images.cuda()
        #   original_images = original_images
        save_box_path = os.getcwd() + "/boxes/"
        for i in range(len(fileNames)):
            save_fileName = fileNames[i].split('.')[0]
            save_file_path = os.path.join(save_box_path,
                                          save_fileName + '.txt')
            with open(save_file_path) as f:
                line = f.readlines()
            dog = list(map(int, line[0].split(' ')[:-1]))
            dog_head = list(map(int, line[1].split(' ')[:-1]))
            dog_boxes.append(dog)
            dog_head_boxes.append(dog_head)
        dog_head_boxes = torch.tensor(dog_head_boxes)

        print("read test end")
        #################################  read boxes list from file  #################################

        inputs = []
        for i in range(len(dog_boxes)):
            input = torch.tensor(
                original_images[i][:,
                                   int(dog_boxes[i][0]):int(dog_boxes[i][0] +
                                                            dog_boxes[i][2]),
                                   int(dog_boxes[i][1]):int(dog_boxes[i][1] +
                                                            dog_boxes[i][3])])
            input = torch.unsqueeze(input, dim=0)
            input = F.upsample(input, (224, 224),
                               mode='bilinear',
                               align_corners=False)
            input = torch.squeeze(input, dim=0)
            inputs.append(input)
        inputs = torch.stack(inputs, dim=0)

        if (device == "cuda"):
            inputs, labels = inputs.cuda(), labels.cuda()

        # forward pass to get outputs
        bbox, pred1, pred2 = my_model(inputs)

        # calculate the loss
        loss1 = criterion(pred1, labels)

        loss2 = criterion(pred2, labels)

        loss3, _ = bbox_loss(bbox, dog_head_boxes)
        loss3 = torch.tensor(loss3)

        temp1 = pred1[:, labels]
        temp1 = temp1.cpu().detach().numpy()
        temp2 = pred2[:, labels]
        temp2 = temp2.cpu().detach().numpy()

        loss4 = []
        for i in range(batch_size):
            loss4.append(max(0, temp1[i][i] - temp2[i][i] + margin))
        loss4 = torch.tensor(loss4)
        # loss4 = np.mean(loss4)
        loss4 = torch.mean(loss4).cuda()
        # loss4 = torch.mean(loss4)

        loss3 = torch.mean(loss3).cuda()
        # loss3 = torch.mean(loss3)

        loss = loss1 + loss2 + loss3 + loss4

        if (device == "cuda"):
            test_loss = test_loss + ((torch.ones(1).cuda() / (batch_i + 1)) *
                                     (loss.data - test_loss))
        else:
            test_loss = test_loss + ((torch.ones(1) / (batch_i + 1)) *
                                     (loss.data - test_loss))

        _, result2 = torch.max(pred2.data, 1)
        correct = np.squeeze(result2.eq(labels.data.view_as(result2)))

        # calculate test accuracy for *each* object class
        # we get the scalar value of correct items for a class, by calling `correct[i].item()`
        for l, c in zip(labels.data, correct):
            class_correct[l] += c.item()
            class_total[l] += 1

    print('Test Loss: {:.6f}\n'.format(test_loss.cpu().numpy()[0]))

    for i in range(len(classes)):
        if class_total[i] > 0:
            print('Test Accuracy of %30s: %2d%% (%2d/%2d)' %
                  (classes[i], 100 * class_correct[i] / class_total[i],
                   np.sum(class_correct[i]), np.sum(class_total[i])))
        else:
            print('Test Accuracy of %5s: N/A (no training examples)' %
                  (classes[i]))

    print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' %
          (100. * np.sum(class_correct) / np.sum(class_total),
           np.sum(class_correct), np.sum(class_total)))