Пример #1
0
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       'min',
                                                       factor=0.9)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

for j in range(0, quot):
    input_tensor_training = []
    block_labels_training = []
    print('This is the ', j + 1, ' set of , ', num_images)
    if (j > 0):
        model = ConvNet()
        model.cuda(cuda0)
        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
        checkpoint = torch.load('/home/sharan/model_1.pth')
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        loss = checkpoint['loss']

    training_files_temp = training_files[j * num_images:(j + 1) * num_images]
    block_div_tensor(training_files_temp,
                     input_tensor_training, block_labels_training, 'train',
                     len(training_files_temp), labels)
    print('Blocks for training have been created')

    #labels[:] = []
    block_labels_tensor_train_encode = Label_Encode(block_labels_training)
    #input_tensor_stack_training = torch.zeros(len(input_tensor_training), 16, 64, 64)

    #for i in range(0, len(input_tensor_training)):
    #input_tensor_stack_training[i, :, :, :] = input_tensor_training[i]
Пример #2
0
from memtorch.mn.Module import patch_model
from memtorch.map.Parameter import naive_map
from memtorch.bh.crossbar.Program import naive_program

from conv_net import ConvNet
from model import Model

# Create new reference memristor
reference_memristor = memtorch.bh.memristor.VTEAM
reference_memristor_params = {"time_series_resolution": 1e-10}
memristor = reference_memristor(**reference_memristor_params)
memristor.plot_hysteresis_loop()

memristor_model = ConvNet()
memristor_model.load_state_dict(torch.load("model.ckpt"), strict=False)

patched_model = patch_model(copy.deepcopy(memristor_model),
                            memristor_model=reference_memristor,
                            memristor_model_params=reference_memristor_params,
                            module_parameters_to_patch=[torch.nn.Linear],
                            mapping_routine=naive_map,
                            transistor=True,
                            programming_routine=None,
                            tile_shape=(128, 128),
                            max_input_voltage=1.0,
                            ADC_resolution=8,
                            ADC_overflow_rate=0.,
                            quant_method='linear')

print("Hello world")
# test_loader = torch.utils.data.DataLoader(
#     datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
#             transforms.ToTensor(),
#             ])),
#         batch_size=1, shuffle=False, sampler=torch.utils.data.SubsetRandomSampler(list(
#                                               range(100))))

# Define what device we are using
print("CUDA Available: ", torch.cuda.is_available())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

pretrained_model = "models/cnn_mnist.ckpt"
model = ConvNet().to(device)
# pretrained_model = "models/lenet_mnist_model.pth"
# model = Net().to(device)
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
model.eval()

gp_model, likelihood = load_combined_model('models/gp_mnist.dat')
gp_model.eval()
likelihood.eval()


# FGSM attack code
def fgsm_attack(image, epsilon, data_grad):
    # Collect the element-wise sign of the data gradient
    sign_data_grad = data_grad.sign()
    # Create the perturbed image by adjusting each pixel of the input image
    perturbed_image = image + epsilon * sign_data_grad
    # Adding clipping to maintain [0,1] range
    perturbed_image = torch.clamp(perturbed_image, 0, 1)
Пример #4
0
from detect_balls import find_circles
from find_table_corners import table_corners
from find_best_shot import *
from project_board import *

test_number = 3
ckpt_epoch = 9
player = "solid"

data_dir = "."
model_weights = os.path.join("epoch_%d.pt" % ckpt_epoch)
use_cuda = torch.cuda.is_available()
device = torch.device("cpu")
model = ConvNet().to(device)
model.eval()
model.load_state_dict(
    torch.load(model_weights, map_location=lambda storage, loc: storage))

cap = cv2.VideoCapture(os.path.join(data_dir,
                                    "full_test_%d.mp4" % test_number))

frame_num = 0
while cap.isOpened():
    frame_num += 1

    ret, frame = cap.read()
    if not ret:
        print("Can't receive frame (stream end?). Exiting ...")
        break
    h, w = frame.shape[0], frame.shape[1]

    # create modified image