def run_demo(): logger = Logger(debug=False) logger.info("Meccano Module Demo") port = SerialPort(logger, pin=4, bit_compensation=77) md = ModuleDiscovery(logger, port) md.discover_modules_in_channel() if md.modules[0] is None: print("FAILED TO DISCOVER MECCA SERVOS") exit() logger.info("Found modules. Lets exercise them") # # Run the 2 servos through some tasks commands = [ (50, (1, 0, 0)), (75, (0, 1, 0)), (100, (0, 0, 1)), (125, (1, 0, 0)), (150, (0, 1, 0)), (175, (0, 0, 1)), ] m1 = md.modules[0] m2 = md.modules[1] for i in range(30): idx = i % len(commands) p = commands[idx][0] r, g, b = commands[idx][1] m1.set_position(p) m2.set_color(r, g, b) time.sleep(1)
def setUp(self): logger = Logger(False) pin = 4 self.port = SerialPort(logger, pin=pin, bit_compensation=10)
def main(): save_dir = join(save_root, args.save_dir) if not os.path.isdir(save_dir): os.makedirs(save_dir) if args.gpu is not None: print(('Using GPU %d' % args.gpu)) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) else: print('CPU mode') print('Process number: %d' % (os.getpid())) ## DataLoader initialize ILSVRC2012_train_processed trainpath = join(args.data_path, args.domain) train_data = DataLoader(trainpath, split='train', classes=args.classes, ssl=True) train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) valpath = join(args.data_path, args.domain) val_data = DataLoader(valpath, split='validation', classes=args.classes) val_loader = torch.utils.data.DataLoader(dataset=val_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) iter_per_epoch = train_data.N / args.batch print('Images: train %d, validation %d' % (train_data.N, val_data.N)) # Network initialize net = Network(args.classes) if args.gpu is not None: net.cuda() criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) logger = Logger(join(save_root, args.save_dir, 'train')) logger_test = Logger(join(save_root, args.save_dir, 'test')) ############## TESTING ############### if args.evaluate: test(net, criterion, None, val_loader, 0) return ############## TRAINING ############### print(('Start training: lr %f, batch size %d, classes %d' % (args.lr, args.batch, args.classes))) print(('Checkpoint: ' + args.save_dir)) # Train the Model batch_time, net_time = [], [] steps = args.iter_start best_acc = -1 for epoch in range(args.epochs): if epoch % 10 == 0 and epoch > 0: net, acc = test(net, criterion, logger_test, val_loader, steps) if (best_acc < acc): net.save(join(save_dir, 'best_model.pth')) lr = adjust_learning_rate(optimizer, epoch, init_lr=args.lr, step=20, decay=0.1) end = time() for i, (images, labels, original) in enumerate(train_loader): batch_time.append(time() - end) if len(batch_time) > 100: del batch_time[0] images = Variable(images) labels = Variable(labels) if args.gpu is not None: images = images.cuda() labels = labels.cuda() # Forward + Backward + Optimize optimizer.zero_grad() t = time() outputs = net(images) net_time.append(time() - t) if len(net_time) > 100: del net_time[0] prec1, prec5 = compute_accuracy(outputs.cpu().data, labels.cpu().data, topk=(1, 5)) # acc = prec1[0] acc = prec1 loss = criterion(outputs, labels) loss.backward() optimizer.step() loss = float(loss.cpu().data.numpy()) steps += 1 if steps % 1000 == 0: filename = join(save_dir, ('%06d.pth.tar' % (steps))) net.save(filename) print('Saved: ' + args.save_dir) end = time() ########################################################################################################### # classifier finetune # ########################################################################################################### finetune_model = Network(65) pretrained_dict = { k: v for k, v in net.state_dict().items() if k in finetune_model.state_dict() } finetune_model.state_dict().update(pretrained_dict)
if resp == MODULE_TYPE_LED: module = LightModule(self.protocol, module_id) module.set_color(0, 7, 0, 7) # Set to Green initially self.modules[module_id] = module found = True counter += 1 if not found: return False return True if __name__ == "__main__": from Serial.SerialPort import SerialPort from Utils.logger import Logger logger = Logger(True) logger.info("Meccano Module") port = SerialPort(logger, pin=4, bit_compensation=77) md = ModuleDiscovery(logger, port) md.discover_modules_in_channel() m1 = md.modules[0] m1.set_position(50) m2 = md.modules[1] m2.set_color(1, 0, 0)
current_compliance=100e-3) # Amps # 30 sec REVERSE_PARAMS = IVParams(max_voltage=-1100, # Volts step_voltage=10, # Volts current_compliance=100e-6) # Amps do_IV_each_time = True SCOPE_TIMEOUT = 5 # Secs # ##### # # Setup # # ##### # logger = Logger(logger_id="Carac", time_format="%d/%m/%y %H:%M:%S", path=pathlib.Path('./log'), default_save=True) Diode = namedtuple("Diode", ['board', 'name']) TestData = namedtuple("TestData", ['name', 'amp', 'temp']) DIODES = [] for diode in DIODE_LIST: DIODES.append(Diode(*diode)) diode_dirpath = PATH / DIODES[-1].name (diode_dirpath / SURGE_NAMES.dirname).mkdir(parents=True, exist_ok=True) (diode_dirpath / DIRECT_NAMES.dirname).mkdir(parents=True, exist_ok=True) (diode_dirpath / REVERSE_NAMES.dirname).mkdir(parents=True, exist_ok=True) # ######## # # Main App #
# Sleep Actual # 1000000 1,001,150 # 100000 100,270 # 10000 10,250 # 1000 1,150 # 1 mSec # 500 610 # # 400 513 # 300 420 # 200 315 # 100 220 # 50 165 # 10 120 # 0 45 # None 35 (17) if __name__ == "__main__": logger = Logger(debug=False) logger.info("Serial Port") port = SerialPort(logger, pin=4, bit_compensation=77) # port.send_one_byte(0xA5) # port.send_many_bytes([0xAA,0x55,0xAA,0x55]) logger.info("Send init sequence to get some actual data") port.send_many_bytes([0xFF, 0xFE, 0xFE, 0xFE, 0xFE, 0xa0]) byte = port.receive_one_byte() logger.info("byte: {}".format(hex(byte))) # for i in range(10): # byte=port.receive_one_byte() # logger.info("Got one byte: {}".format(byte))
def main(): if args.gpu is not None: print(('Using GPU %d' % args.gpu)) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) else: print('CPU mode') print('Process number: %d' % (os.getpid())) model = load_pretrained_weights(args) model.train(True) if args.gpu is not None: model.cuda() trainpath = join(args.data_path, args.domain) train_data = DataLoader(trainpath, split='train', classes=args.classes, ssl=False) train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) valpath = join(args.data_path, args.domain) val_data = DataLoader(valpath, split='train', classes=args.classes, ssl=False) val_loader = torch.utils.data.DataLoader(dataset=val_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) logger = Logger(join(save_root, args.save_dir, 'train')) logger_test = Logger(join(save_root, args.save_dir, 'test')) batch_time, net_time = [], [] steps = args.iter_start for epoch in range(args.epochs): if epoch % 10 == 0 and epoch > 0: model = test(model, criterion, logger_test, val_loader, steps) lr = adjust_learning_rate(optimizer, epoch, init_lr=args.lr, step=20, decay=0.1) end = time() for i, (images, labels) in enumerate(train_loader): # print(i, images.shape, labels) batch_time.append(time() - end) if len(batch_time) > 100: del batch_time[0] images = Variable(images) labels = Variable(labels) if args.gpu is not None: images = images.cuda() labels = labels.cuda() # Forward + Backward + Optimize optimizer.zero_grad() t = time() outputs = model(images) net_time.append(time() - t) if len(net_time) > 100: del net_time[0] prec1 = compute_accuracy(outputs.cpu().data, labels.cpu().data, topk=(1,)) acc = prec1[0] # acc = prec1 loss = criterion(outputs, labels) loss.backward() optimizer.step() loss = float(loss.cpu().data.numpy()) if steps % 20 == 0: print( ( '[%2d/%2d] %5d) [batch load % 2.3fsec, net %1.2fsec], LR %.5f, Loss: % 1.3f, Accuracy % 2.2f%%' % ( epoch + 1, args.epochs, steps, np.mean(batch_time), np.mean(net_time), lr, loss, acc))) steps += 1 if steps % 1000 == 0: filename = 'jps_%03i_%06d.pth.tar' % (epoch, steps) filename = join(save_root, args.save_dir, filename) model.save(filename) print('Saved: ' + args.save_dir) end = time()
def main(): args = parse_args() trg_num = domain_dict[args.trg_domain] src_num = domain_dict[args.src_domain] if args.gpu is not None: print(('Using GPU %d' % args.gpu)) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) else: print('CPU mode') print('Process number: %d' % (os.getpid())) stage = args.stage if (stage == 1): model = Network(args.classes[stage - 1]) model.train(True) if args.gpu is not None: model.cuda() save_dir = join(save_root, args.save_dir, 'stage1') os.makedirs(save_dir, exist_ok=True) print('save dir: ', save_dir) trainpath = join(args.data_path, args.trg_domain) train_data = DataLoader(trainpath, split='train', classes=args.classes[stage - 1], ssl=True) valpath = join(args.data_path, args.trg_domain) val_data = DataLoader(valpath, split='train', classes=args.classes[stage - 1], ssl=True) ssl_train(args=args, model=model, train_data=train_data, val_data=val_data, save_dir=save_dir, domain_num=trg_num) if (args.proceed): stage += 1 if (stage == 2): save_dir = join(save_root, args.save_dir, 'stage2') os.makedirs(save_dir, exist_ok=True) print('save dir: ', save_dir) model = Network(args.classes[stage - 1]) if (args.proceed): model_path = join(save_dir.replace('stage2', 'stage1'), 'best_model.pth') else: model_path = args.model_path pre = torch.load(model_path) new_pre = OrderedDict() for name in pre: if ("classifier" in name): continue else: new_pre[name] = pre[name] model.load_state_dict(new_pre, strict=False) bn_name = 'bns.' + (str)(src_num) for name, p in model.named_parameters(): if ('fc' in name) or (bn_name in name): p.requires_grad = True # print(name) continue else: p.requires_grad = False return torch.nn.init.xavier_uniform_(model.fc6.fc6_s1.weight) torch.nn.init.xavier_uniform_(model.fc7.fc7.weight) torch.nn.init.xavier_uniform_(model.classifier.fc8.weight) model.train(True) if args.gpu is not None: model.cuda() trainpath = join(args.data_path, args.src_domain) train_data = DataLoader(trainpath, split='train', classes=args.classes[stage - 1], ssl=False) valpath = join(args.data_path, args.src_domain) val_data = DataLoader(valpath, split='train', classes=args.classes[stage - 1], ssl=False) train(args=args, model=model, train_data=train_data, val_data=val_data, save_dir=save_dir, domain_num=domain_dict[args.src_domain]) if (args.proceed): stage += 1 if stage == 3: model = Network(args.classes[stage - 2]) if args.proceed: model_path = join(save_root, args.save_dir, 'stage2', 'best_model.pth') else: model_path = args.model_path model.load_state_dict(torch.load(model_path)) if args.gpu is not None: model.cuda() model.eval() logger = Logger(join(save_dir, 'test')) valpath = join(args.data_path, args.trg_domain) val_data = DataLoader(valpath, split='train', classes=args.classes[stage - 2], ssl=False) val_loader = torch.utils.data.DataLoader(dataset=val_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) y_s = [] pred_s = [] accuracy = [] for i, (x, y) in enumerate(val_loader): x = Variable(x) if args.gpu is not None: x = x.cuda() # Forward + Backward + Optimize outputs = model( x, domain_num * torch.ones(x.shape[0], dtype=torch.long).cuda()) outputs = outputs.cpu().data y_s += y pred_s += outputs.argmax(axis=1) prec1, _ = compute_accuracy(outputs, y, topk=(1, 5)) accuracy.append(prec1.item()) c_mat = confusion_matrix(y_true=y_s, y_pred=pred_s) np.save(join(save_dir, 'stage3_c_mat.npy'), c_mat) print('confusion matrix saved at: ', join(save_dir, 'stage3_c_mat.npy')) print('TESTING: ), Accuracy %.2f%%' % (np.mean(accuracy)))
def train(args, model, train_data, val_data, save_dir, domain_num): train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) val_loader = torch.utils.data.DataLoader(dataset=val_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) criterion = nn.CrossEntropyLoss() # optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=5e-4) logger = Logger(join(save_dir, 'train')) logger_test = Logger(join(save_dir, 'test')) batch_time, net_time = [], [] steps = args.iter_start best_acc = -1 for epoch in range(args.epochs): if epoch % 10 == 0 and epoch > 0: model, acc = test(args, model, logger_test, val_loader, steps, domain_num, save_dir) if (best_acc < acc): best_acc = acc filename = join(save_dir, 'best_model.pth') model.save(filename) lr = adjust_learning_rate(optimizer, epoch, init_lr=args.lr, step=20, decay=0.1) end = time() for i, (x, y) in enumerate(train_loader): # print(i, images.shape, labels) batch_time.append(time() - end) if len(batch_time) > 100: del batch_time[0] x = Variable(x) y = Variable(y) if args.gpu is not None: x = x.cuda() y = y.cuda() # Forward + Backward + Optimize optimizer.zero_grad() t = time() outputs = model( x, domain_num * torch.ones(x.shape[0], dtype=torch.long).cuda()) net_time.append(time() - t) if len(net_time) > 100: del net_time[0] prec1 = compute_accuracy(outputs.cpu().data, y.cpu().data, topk=(1, )) acc = prec1[0] loss = criterion(outputs, y) loss.backward() optimizer.step() steps += 1 if (best_acc < acc): best_acc = acc filename = join(save_dir, 'best_model.pth') model.save(filename) if steps % 1000 == 0: filename = 'step_%06d.pth.tar' % (steps) filename = join(save_dir, filename) model.save(filename) print('steps: %d' % (steps)) end = time() steps = -1 model, acc = test(args, model, logger_test, val_loader, steps, domain_num, save_dir) print('final acc: %0.3f' % (acc)) return
def ssl_train(args, model, train_data, val_data, save_dir, domain_num): logger_test = Logger(save_dir + '/test') train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) val_loader = torch.utils.data.DataLoader(dataset=val_data, batch_size=args.batch, shuffle=True, num_workers=args.cores) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=5e-4) iter_per_epoch = train_data.N / args.batch print('Images: train %d, validation %d' % (train_data.N, val_data.N)) print(('Start training: lr %f, batch size %d, classes %d' % (args.lr, args.batch, args.classes[0]))) batch_time, net_time = [], [] steps = args.iter_start best_acc = -1 for epoch in range(int(args.iter_start / iter_per_epoch), args.epochs): if epoch % 10 == 0 and epoch > 0: print('Evaluating network.......') accuracy = [] model.eval() for i, (x, y, _) in enumerate(val_loader): x = Variable(x) if args.gpu is not None: x = x.cuda() # Forward + Backward + Optimize outputs = model( x, domain_num * torch.ones(x.shape[0], dtype=torch.long).cuda()) outputs = outputs.cpu().data prec1, prec5 = compute_accuracy(outputs, y, topk=(1, 5)) accuracy.append(prec1.item()) acc = np.mean(accuracy) if (best_acc < acc): best_acc = acc filename = join(save_dir, 'best_model.pth') model.save(filename) if logger_test is not None: logger_test.scalar_summary('accuracy', acc, steps) print('TESTING: %d), Accuracy %.2f%%' % (steps, acc)) model.train() lr = adjust_learning_rate(optimizer, epoch, init_lr=args.lr, step=20, decay=0.1) end = time() for i, (x, y, original) in enumerate(train_loader): batch_time.append(time() - end) if len(batch_time) > 100: del batch_time[0] images = Variable(x) labels = Variable(y) if args.gpu is not None: x = x.cuda() y = y.cuda() # Forward + Backward + Optimize optimizer.zero_grad() t = time() outputs = model( x, domain_num * torch.ones(x.shape[0], dtype=torch.long).cuda()) net_time.append(time() - t) if len(net_time) > 100: del net_time[0] prec1, prec5 = compute_accuracy(outputs.cpu().data, y.cpu().data, topk=(1, 5)) # acc = prec1[0] acc = prec1 loss = criterion(outputs, y) loss.backward() optimizer.step() loss = float(loss.cpu().data.numpy()) steps += 1 if steps % 1000 == 0: filename = 'step_%06d.pth.tar' % (steps) filename = join(save_dir, filename) model.save(filename) print('model saved at: ', filename) end = time()
self.__send_data_to_module_chain(module_id) resp = self.__get_data_from_chain() sleep(0.10) return resp def __send_data_to_module_chain(self, module_id): data = [HEADER] + self.data + [ self.__calculate_checksum(module_id), ] self.port.send_many_bytes(data) def __get_data_from_chain(self): input_byte = self.port.receive_one_byte() if input_byte == -1: input_byte = MODULE_NOT_RESPONDING return input_byte def __calculate_checksum(self, module_id): checksum = self.data[0] + self.data[1] + self.data[2] + self.data[3] checksum += (checksum >> 8) checksum += (checksum << 4) checksum &= 0xF0 checksum |= module_id return checksum if __name__ == "__main__": from Utils.logger import Logger logger = Logger(False) logger.info("MeccanoModule")