Ejemplo n.º 1
0
def generate_ctx(ctx_config):
    ctx_list = []

    if ctx_config is None:
        ctx_list.append(Context(device_type='gpu', device_id=0))
        return ctx_list
    for ctx in ctx_config:
        name = ctx['device_name']
        if name == 'cpu':
            ctx_list.append(Context(device_type=name, device_id=0))
            return ctx_list
        else:
            device_id = int(ctx['device_id'])
            ctx_list.append(Context(device_type='gpu', device_id=device_id))
    return tuple(ctx_list)
Ejemplo n.º 2
0
def run_experiment(runname, batch_size, on_amazon = False, load_model = False, old_runname = None, start_epoch = None ):
    # -----------------------------------------------------------------------------------------------------------
    if on_amazon:
        # AWS Sagemaker:
        import boto3
        region = boto3.Session().region_name
        bucket = boto3.Session().resource('s3').Bucket('sagemaker-inf')
        MODEL_DIR = '/dev/shm/models'

    device       = Device.GPU2
    epochs       = 50
    features     = 64
    all_image_size = 96
    in_chan = 18
        
    context = cpu() if device.value == -1 else gpu(device.value)
    # ----------------------------------------------------
    if load_model:
        summaryWriter = SummaryWriter('logs/'+old_runname, flush_secs=5)
    else:
        summaryWriter = SummaryWriter('logs/'+runname, flush_secs=5)

    train_iter = modules.make_video_iterator('training','V1','V2', 'V3', batch_size=batch_size, shuffle=True)
    test_iter = modules.make_video_iterator('testing', 'V1','V2', 'V3', batch_size=batch_size, shuffle=True)

    RFlocs_V1_overlapped_avg = modules.get_RFs('V1', context)
    RFlocs_V2_overlapped_avg = modules.get_RFs('V2', context)
    RFlocs_V3_overlapped_avg = modules.get_RFs('V3', context)

    with Context(context):
        discriminator = Discriminator(in_chan)
        generator = Generator(in_chan, context)
        
        if load_model:
            if on_amazon:
                generator.network.load_parameters(f'{MODEL_DIR}/saved_models/{runname}/netG_{epoch}.model', ctx=context) 
                discriminator.network.load_parameters(f'{MODEL_DIR}/saved_models/{runname}/netG_{epoch}.model')

                
            else:
                generator.network.load_parameters(f'saved_models/{old_runname}/netG_{start_epoch}.model', ctx=context) 
                discriminator.network.load_parameters(f'saved_models/{old_runname}/netD_{start_epoch}.model')

        
        d = discriminator.network

        g = generator.network

        print( 'train_dataset_length:', len(train_iter._dataset))

        for epoch in range(epochs):

            loss_discriminator_train = []
            loss_generator_train = []

            # ====================
            # T R AI N I N G
            # ====================

            for RFsignalsV1,RFsignalsV2,RFsignalsV3, targets, recon in tqdm(train_iter, total = len(train_iter)):
                # -------
                # Inputs
                # -------
                inputs1 = modules.get_inputsROI(RFsignalsV1, RFlocs_V1_overlapped_avg, context)
                inputs2 = modules.get_inputsROI(RFsignalsV2, RFlocs_V2_overlapped_avg, context)
                inputs3 = modules.get_inputsROI(RFsignalsV3, RFlocs_V3_overlapped_avg, context)

                inputs = concat(inputs1, inputs2, inputs3, recon, dim=1)
                # ------------------------------------
                # T R A I N  D i s c r i m i n a t o r
                # ------------------------------------
                targets = targets.transpose((0,1,4,2,3)).reshape((-1,18, 96,96))

                loss_discriminator_train.append(discriminator.train(g, inputs, targets))

                # ----------------------------
                # T R A I N  G e n e r a t o r
                # ----------------------------
                loss_generator_train.append(generator.train(d, inputs, targets))

                # ====================
                # T E S T I N G 
                # ====================
                loss_discriminator_test = []
                loss_generator_test = []

            for  RFsignalsV1,RFsignalsV2,RFsignalsV3, targets, recon in test_iter:
                # -------
                # Inputs
                # -------
                inputs1 = modules.get_inputsROI(RFsignalsV1, RFlocs_V1_overlapped_avg, context)
                inputs2 = modules.get_inputsROI(RFsignalsV2, RFlocs_V2_overlapped_avg, context)
                inputs3 = modules.get_inputsROI(RFsignalsV3, RFlocs_V3_overlapped_avg, context)
                inputs = concat(inputs1, inputs2, inputs3, recon, dim=1)

                # -----
                # Targets
                # -----            
                targets = targets.transpose((0,1,4,2,3)).reshape((-1,18, 96,96))

                # ----
                # sample randomly from history buffer (capacity 50) 
                # ----
                y_hat = g(inputs)

                z = concat(inputs, y_hat, dim=1)

                dis_loss_test = 0.5 * (discriminator.lossfun(0, d(z)) + discriminator.lossfun(1,d(concat(inputs, targets,dim=1))))

                loss_discriminator_test.append(float(dis_loss_test.asscalar()))

                gen_loss_test = generator.lossfun(1, d(concat(inputs, y_hat, dim=1)), targets.reshape((-1,3,96,96)), y_hat.reshape((-1,3,96,96)))

                loss_generator_test.append(float(gen_loss_test.asscalar()))

            os.makedirs('saved_models/'+runname, exist_ok=True)
            generator.network.save_parameters(f'saved_models/{runname}/netG_{epoch}.model')
            discriminator.network.save_parameters(f'saved_models/{runname}/netD_{epoch}.model')            

            # ------------------------------------------------------------------
            # T R A I N I N G Losses
            # ------------------------------------------------------------------
            np.save(f'saved_models/{runname}/Gloss_train{epoch}', np.array(loss_generator_train))
            np.save(f'saved_models/{runname}/Dloss_train{epoch}', np.array(loss_discriminator_train))
            # ------------------------------------------------------------------
            # T E S T I N G Losses
            # ------------------------------------------------------------------
            np.save(f'saved_models/{runname}/Gloss_test{epoch}', np.array(loss_generator_test))
            np.save(f'saved_models/{runname}/Dloss_test{epoch}', np.array(loss_discriminator_test))            
Ejemplo n.º 3
0
def run_experiment(fraction_train,
                   load_model=False,
                   old_runname=None,
                   start_epoch=None):

    runname = f'splitted_data_{str(fraction_train)}'

    device = Device.GPU1
    epochs = 50
    features = 64
    batch_size = 4
    all_image_size = 96
    in_chan = 15

    context = cpu() if device.value == -1 else gpu(device.value)
    # ----------------------------------------------------
    if load_model:
        summaryWriter = SummaryWriter('logs/' + old_runname, flush_secs=5)
    else:
        summaryWriter = SummaryWriter('logs/' + runname, flush_secs=5)

    train_iter = modules.make_iterator_preprocessed(
        'training',
        'V1',
        'V2',
        'V3',
        batch_size=batch_size,
        shuffle=True,
        fraction_train=fraction_train)
    test_iter = modules.make_iterator_preprocessed('testing',
                                                   'V1',
                                                   'V2',
                                                   'V3',
                                                   batch_size=batch_size,
                                                   shuffle=True)

    RFlocs_V1_overlapped_avg = modules.get_RFs('V1', context)
    RFlocs_V2_overlapped_avg = modules.get_RFs('V2', context)
    RFlocs_V3_overlapped_avg = modules.get_RFs('V3', context)

    with Context(context):
        discriminator = Discriminator(in_chan)
        generator = Generator(in_chan, context)

        if load_model:
            generator.network.load_parameters(
                f'saved_models/{old_runname}/netG_{start_epoch}.model',
                ctx=context)
            discriminator.network.load_parameters(
                f'saved_models/{old_runname}/netD_{start_epoch}.model')

        gen_lossfun = gen.Lossfun(1, 100, 1, context)
        d = discriminator.network

        dis_lossfun = dis.Lossfun(1)
        g = generator.network

        print('train_dataset_length:', len(train_iter._dataset))

        for epoch in range(epochs):

            loss_discriminator_train = []
            loss_generator_train = []

            # ====================
            # T R AI N I N G
            # ====================

            for RFsignalsV1, RFsignalsV2, RFsignalsV3, targets in tqdm(
                    train_iter, total=len(train_iter)):
                # -------
                # Inputs
                # -------
                inputs1 = modules.get_inputsROI(RFsignalsV1,
                                                RFlocs_V1_overlapped_avg,
                                                context)
                inputs2 = modules.get_inputsROI(RFsignalsV2,
                                                RFlocs_V2_overlapped_avg,
                                                context)
                inputs3 = modules.get_inputsROI(RFsignalsV3,
                                                RFlocs_V3_overlapped_avg,
                                                context)
                inputs = concat(inputs1, inputs2, inputs3, dim=1)
                # ------------------------------------
                # T R A I N  D i s c r i m i n a t o r
                # ------------------------------------
                targets = targets.as_in_context(context).transpose(
                    (0, 1, 3, 2))

                loss_discriminator_train.append(
                    discriminator.train(g, inputs, targets))

                # ----------------------------
                # T R A I N  G e n e r a t o r
                # ----------------------------
                loss_generator_train.append(generator.train(
                    d, inputs, targets))

            if load_model:
                os.makedirs('saved_models/' + old_runname, exist_ok=True)
                generator.network.save_parameters(
                    f'saved_models/{old_runname}/netG_{epoch+start_epoch+1}.model'
                )
                discriminator.network.save_parameters(
                    f'saved_models/{old_runname}/netD_{epoch+start_epoch+1}.model'
                )
            else:
                os.makedirs('saved_models/' + runname, exist_ok=True)
                generator.network.save_parameters(
                    f'saved_models/{runname}/netG_{epoch}.model')
                discriminator.network.save_parameters(
                    f'saved_models/{runname}/netD_{epoch}.model')

            # ====================
            # T E S T I N G
            # ====================
            loss_discriminator_test = []
            loss_generator_test = []

            for RFsignalsV1, RFsignalsV2, RFsignalsV3, targets in test_iter:
                # -------
                # Inputs
                # -------
                inputs1 = modules.get_inputsROI(RFsignalsV1,
                                                RFlocs_V1_overlapped_avg,
                                                context)
                inputs2 = modules.get_inputsROI(RFsignalsV2,
                                                RFlocs_V2_overlapped_avg,
                                                context)
                inputs3 = modules.get_inputsROI(RFsignalsV3,
                                                RFlocs_V3_overlapped_avg,
                                                context)
                inputs = concat(inputs1, inputs2, inputs3, dim=1)

                # -----
                # Targets
                # -----
                targets = targets.as_in_context(context).transpose(
                    (0, 1, 3, 2))

                # ----
                # sample randomly from history buffer (capacity 50)
                # ----

                z = concat(inputs, g(inputs), dim=1)

                dis_loss_test = 0.5 * (dis_lossfun(0, d(z)) + dis_lossfun(
                    1, d(concat(inputs, targets, dim=1))))

                loss_discriminator_test.append(float(dis_loss_test.asscalar()))

                gen_loss_test = (lambda y_hat: gen_lossfun(
                    1, d(concat(inputs, y_hat, dim=1)), targets, y_hat))(
                        generator.network(inputs))

                loss_generator_test.append(float(gen_loss_test.asscalar()))

            summaryWriter.add_image(
                "input", modules.leclip(inputs.expand_dims(2).sum(1)), epoch)
            summaryWriter.add_image("target", modules.leclip(targets), epoch)
            summaryWriter.add_image("pred", modules.leclip(g(inputs)), epoch)
            summaryWriter.add_scalar(
                "dis/loss_discriminator_train",
                sum(loss_discriminator_train) / len(loss_discriminator_train),
                epoch)
            summaryWriter.add_scalar(
                "gen/loss_generator_train",
                sum(loss_generator_train) / len(loss_generator_train), epoch)

            summaryWriter.add_scalar(
                "dis/loss_discriminator_test",
                sum(loss_discriminator_test) / len(loss_discriminator_test),
                epoch)
            summaryWriter.add_scalar(
                "gen/loss_generator_test",
                sum(loss_generator_test) / len(loss_generator_test), epoch)

            # ------------------------------------------------------------------
            # T R A I N I N G Losses
            # ------------------------------------------------------------------
            np.save(f'saved_models/{runname}/Gloss_train',
                    np.array(loss_generator_train))
            np.save(f'saved_models/{runname}/Dloss_train',
                    np.array(loss_discriminator_train))
            # ------------------------------------------------------------------
            # T E S T I N G Losses
            # ------------------------------------------------------------------
            np.save(f'saved_models/{runname}/Gloss_test',
                    np.array(loss_generator_test))
            np.save(f'saved_models/{runname}/Dloss_test',
                    np.array(loss_discriminator_test))
Ejemplo n.º 4
0
in_chan = 15

# -----------------------------------------------------------------------------------------------------------

if __name__ == "__main__":
    # -------------------------------
    # Context as needed to run on GPU
    # -------------------------------
    context = cpu() if device.value == -1 else gpu(device.value)

    # ----------------------------------------------------
    # SummaryWriter is for visualizing logs in tensorboard
    # ----------------------------------------------------
    summaryWriter = SummaryWriter('../../logs/' + runname, flush_secs=5)

with Context(context):
    # ----------------------------------------------------
    # RF centers: overlapping
    # ----------------------------------------------------
    RFlocs_V1_overlapped_avg = modules.get_RFs('V1', context)
    RFlocs_V2_overlapped_avg = modules.get_RFs('V2', context)
    RFlocs_V3_overlapped_avg = modules.get_RFs('V3', context)

    test_iter = modules.make_iterator_preprocessed('testing',
                                                   'V1',
                                                   'V2',
                                                   'V3',
                                                   batch_size=batch_size,
                                                   shuffle=True)

    RF_signals_lengths = []
def get_manifold(X):
	from mxnet import nd, Context
	from mxnet import ndarray as F
	from mxnet.gluon import Block, nn
	from mxnet.initializer import Uniform

	class Model(Block):
		def __init__(self, num_dim, **kwargs):
			super(Model, self).__init__(**kwargs)
			wi1 = Uniform(0.25)
			wi2 = Uniform(0.1)
			with self.name_scope():
				self.encoder1 = nn.Dense(num_dim//4, in_units=num_dim, weight_initializer=wi1)
				self.encoder2 = nn.Dense(num_dim//16, in_units=num_dim//4, weight_initializer=wi1)
				self.encoder3 = nn.Dense(num_dim//64, in_units=num_dim//16, weight_initializer=wi2)
				self.encoder4 = nn.Dense(num_dim//256, in_units=num_dim//64, weight_initializer=wi2)
				self.decoder4 = nn.Dense(num_dim//64, in_units=num_dim//256, weight_initializer=wi2)
				self.decoder3 = nn.Dense(num_dim//16, in_units=num_dim//64, weight_initializer=wi2)
				self.decoder2 = nn.Dense(num_dim//4, in_units=num_dim//16, weight_initializer=wi1)
				self.decoder1 = nn.Dense(num_dim, in_units=num_dim//4, weight_initializer=wi1)
			self.layers = [(self.encoder1,self.decoder1),
						(self.encoder2,self.decoder2),
						(self.encoder3,self.decoder3),
						(self.encoder4,self.decoder4)]

			for layer in self.layers:
				self.register_child(layer[0])
				self.register_child(layer[1])
				
		def onelayer(self, x, layer):
			xx = F.tanh(layer[0](x))
			#xx = nn.HybridLambda('tanh')(layer[0](x))
 
			return layer[1](xx)
		
		def oneforward(self, x, layer):
			return F.tanh(layer[0](x))
		
		def forward(self, x):
			n_layer = len(self.layers)
			for i in range(n_layer):
				x = F.tanh(self.layers[i][0](x))
			for i in range(n_layer-1):
				x = F.tanh(self.layers[n_layer-i-1][1](x))
			return self.layers[0][1](x)
		
		def manifold(self, x):
			n_layer = len(self.layers)
			for i in range(n_layer-1):
				x = F.tanh(self.layers[i][0](x))
			return self.layers[n_layer-1][0](x)

	from mxnet import autograd
	from mxnet import gpu, cpu
	from mxnet.gluon import Trainer
	from mxnet.gluon.loss import L2Loss

	# Stacked AutoEncoder
	#model.initialize(ctx=[cpu(0),cpu(1),cpu(2),cpu(3)])
	#ctx = [gpu(1)]
	#ctx = [cpu(i) for i in range(16)]
	with  Context(gpu(0)) as ctx:
		model = Model(X.shape[1])
		model.initialize(ctx=ctx)#,cpu(2),cpu(3)])

		# Select Trainign Algorism
		trainer = Trainer(model.collect_params(),'adam')
		loss_func = L2Loss()

		# Start Pretraining
		print('start pretraining of StackedAE...')
		loss_n = [] # for log

		buffer = nd.array(X.values)
		for layer_id, layer in enumerate(model.layers):
			print('layer %d of %d...'%(layer_id+1,len(model.layers)))
			trainer.set_learning_rate(0.02)
			for epoch in range(1, epochs[layer_id] + 1):
				# random indexs for all datas
				indexs = np.random.permutation(buffer.shape[0])
				for bs in range(0,buffer.shape[0],batch_size):
					be = min(buffer.shape[0],bs+batch_size)
					data = buffer[indexs[bs:be]]
					# forward
					with autograd.record():
						output = model.onelayer(data, layer)
						# make loss
						loss = loss_func(output, data)
						# for log
						loss_n.append(np.mean(loss.asnumpy()))
						del output
					# backward
					loss.backward()
					# step training to one batch
					trainer.step(batch_size, ignore_stale_grad=True)
					del data, loss
				# show log
				print('%d/%d epoch loss=%f...'%(epoch,epochs[layer_id],np.mean(loss_n)))
				loss_n = []
				del bs, be, indexs
			buffer = model.oneforward(buffer, layer)
		del layer, loss_n, buffer

		print('start training of StackedAE...')
		loss_n = []
		buffer = nd.array(X.values)
		trainer.set_learning_rate(0.02)
		for epoch in range(1, epochs[-1] + 1):
			# random indexs for all datas
			indexs = np.random.permutation(buffer.shape[0])
			for bs in range(0,buffer.shape[0],batch_size):
				be = min(buffer.shape[0],bs+batch_size)
				data = buffer[indexs[bs:be]]
				# forward
				with autograd.record():
					output = model(data)
					# make loss
					loss = loss_func(output, data)
					# for log
					loss_n.append(np.mean(loss.asnumpy()))
					del output
				# backward
				loss.backward()
				# step training to one batch
				trainer.step(batch_size, ignore_stale_grad=True)
				del data, loss
			# show log
			print('%d/%d epoch loss=%f...'%(epoch,epochs[-1],np.mean(loss_n)))
			loss_n = []
			del bs, be, indexs
		del trainer, loss_func, loss_n, buffer

		print('making manifold...')
		manifold_X = pd.DataFrame()
		for bs in range(0,X.shape[0],batch_size):
			be = min(X.shape[0],bs + batch_size)
			nx = nd.array(X.iloc[bs:be].values)
			df = pd.DataFrame(model.manifold(nx).asnumpy())
			manifold_X = manifold_X.append(df, ignore_index=True, sort=False)
			del be, df, nx
		del model, bs
		return manifold_X