Esempio n. 1
0
def __getUserCallBack(loginName, password, clientData, httpcode, data, headers, success, url):
	"""
	参数1:HTTP请求返回码 这个结果集合参数是一个整形值
	参数2:返回的内容 它是一个字符串
	参数3:服务器返回的HTTP协议头,如:{"Content-Type": "application/x-www-form-urlencoded"} 它是一个字典
	参数4:执行是否成功,当请求执行有错误时为False 可以通过httpcode进一步判断错误信息
	参数5:请求所用的网址
	"""
	INFO_MSG('__getUserCallBack: loginName=%s, httpcode=%s, success=%s' % (loginName, httpcode, success))

	# 如果获取微信信息失败
	if not success:
		KBEngine.accountLoginResponse(loginName, loginName, clientData, KBEngine.SERVER_ERR_USER1)
		return

	# 数据转换
	data = GTools.json_load(data)

	# 获取用户信息失败
	if 'errcode' in data and data['errcode'] != 0:
		ERROR_MSG('__getUserCallBack: data=%s' % (data))
		KBEngine.accountLoginResponse(loginName, loginName, clientData, KBEngine.SERVER_ERR_USER1)
		return

	# 检查加密数据
	
	key = GTools.getSha1Str(GTools.byteToStr(clientData) + data['session_key'])
	ERROR_MSG("clientData(%s) session_key(%s) key(%s) password(%s)" % (clientData, data['session_key'], key, password))
	
	if key == password:
		INFO_MSG("__getUserCallBack::SERVER_SUCCESS. loginName=%s, openid=%s" % (loginName, data['openid']))
		KBEngine.accountLoginResponse(loginName, data['openid'], clientData, KBEngine.SERVER_SUCCESS)
	else:
		ERROR_MSG("__getUserCallBack::SERVER_ERR_NAME_PASSWORD. loginName=%s, openid=%s" % (loginName, data['openid']))
		KBEngine.accountLoginResponse(loginName, loginName, clientData, KBEngine.SERVER_ERR_NAME_PASSWORD)
Esempio n. 2
0
    def onClientEnabled(self):
        """
        KBEngine method.
        该entity被正式激活为可使用, 此时entity已经建立了client对应实体, 可以在此创建它的
        cell部分。
        """
        DEBUG_MSG("Avatar(%i_%i):onClientEnabled." % (self.id, self.avatarID))

        # 更新最后登陆时间
        self.lastReloginTime = GTools.nowTime()

        self._updateAvatar()

        # 获取主大厅失败
        mainHalls = GTools.getBaseData_MainHalls()
        if mainHalls is None:
            ERROR_MSG("Avatar(%i_%i):onClientEnabled. mainHalls is None." %
                      (self.id, self.avatarID))
            return

        # 通知主大厅上线
        mainHalls.B_tellOnline(self, self.avatarID)

        # 当前游戏厅为空 则加入游戏大厅
        if self.nowHalls is None:
            hallsName = GServerCfg.GC_ServerIDToHalls[self.serverID]
            mainHalls.B_reqEnterGameHalls(self, self.avatarID, hallsName)

        # 当前游戏厅不为空 则通知当前所在大厅
        elif self.client is not None:
            self.client.Exs_tellCurHalls(self.nowHallsName)

        # 通知在线
        if self.cell is not None:
            self.cell.C_tellOnlineStatus(True)
Esempio n. 3
0
    def __init__(self):
        KBEngine.Entity.__init__(self)

        # 已经注册的大厅信息
        self.hallsDict = {}

        # 在线的角色字典
        self.avatarsDict = {}

        # 注册自己到base层的全局
        GTools.setBaseData_MainHalls(self)
Esempio n. 4
0
    def _onAvatarSaved(self, bSuccess, newAV):
        """
        角色实体存库后的回调
        参数1:成功或失败
        参数2:base实体
        """
        # 如果创建过程中账号已经销毁
        if self.isDestroyed and newAV:
            newAV.destroy(True)
            return

        if bSuccess:
            # 保存实体的唯一ID
            newAV.avatarID = newAV.databaseID

            # 记录到列表
            props = GTools.dcopy(GDataTmp.Avatar_Info_In_Account)
            props['avatarID'] = newAV.avatarID
            props['serverID'] = newAV.serverID
            self.avatarDict[newAV.serverID] = props
            self.writeToDB()

            # 销毁实体
            newAV.destroy()

        # 通知创建结果
        if self.client is not None:
            self.client.Exs_ackCreateAvatar(bSuccess, props)
Esempio n. 5
0
    def __init__(self):
        KBEngine.Proxy.__init__(self)

        # 当前角色
        self.curAvatar = None

        # 登陆时客户端的数据
        self.clientData = GTools.json_load(self.getClientDatas()[0])
Esempio n. 6
0
    def onClientEnabled(self):
        """
        KBEngine method.
        该entity被正式激活为可使用, 此时entity已经建立了client对应实体, 可以在此创建它的
        cell部分。
        """
        INFO_MSG("Account(%i) entities enable." % (self.id))

        # 更新最后登陆时间
        self.lastReloginTime = GTools.nowTime()
Esempio n. 7
0
    def resetTimer(self, initialOffset):
        """
        重置游戏定时器
        参数1:时间间隔
        """
        # 如果当前有定时器 先关闭
        if self.timerID != 0:
            self.closeTimer()

        # 开始定时器
        self.timerID = self.addTimer(initialOffset, 0, 0)
        self.startTime = GTools.nowTime()
Esempio n. 8
0
    def onClientDeath(self):
        """
        KBEngine method.
        客户端对应实体已经销毁
        """
        DEBUG_MSG("Avatar(%i_%i):onClientDeath." % (self.id, self.avatarID))

        # 通知主大厅上线
        mainHalls = GTools.getBaseData_MainHalls()
        if mainHalls:
            mainHalls.B_tellOffline(self, self.avatarID)

        # 通知下线
        if self.cell is not None:
            self.cell.C_tellOnlineStatus(False)
Esempio n. 9
0
    def onTimer(self, tid, userArg):
        """
        KBEngine method.
        使用addTimer后, 当时间到达则该接口被调用
        @param tid		: addTimer 的返回值ID
        @param userArg	: addTimer 最后一个参数所给入的数据
        """
        DEBUG_MSG("%s(%i):onTimer. tid(%s) userArg(%s)." %
                  (self.hallsName, self.id, tid, userArg))

        # 向主大厅上报自己信息 上报完成后停止
        if userArg == GDefine.GC_HallsTime['regToMainHalls']['userArg']:
            mainHalls = GTools.getBaseData_MainHalls()
            if mainHalls:
                mainHalls.B_tellHallsInfo(self, self.hallsName)
                self.delTimer(tid)
Esempio n. 10
0
def onRequestAccountLogin(loginName, password, datas):
	"""
	KBEngine method.
	请求登陆账号回调
	@param loginName: 客户端请求时所提交的名称 - 微信登陆获得的code
	@type  loginName: string
	
	@param password: 密码 - sha1( datas + sessionkey )
	@type  password: string
	
	@param datas: 客户端请求时所附带的数据,可将数据转发第三方平台 - 用户信息字符串
	@type  datas: bytes
	"""
	INFO_MSG('onRequestAccountLogin: loginName=%s' % (loginName))
	
	# 此处可通过http等手段将请求提交至第三方平台,平台返回的数据也可放入datas
	# datas将会回调至客户端
	# 如果使用http访问,因为interfaces是单线程的,同步http访问容易卡住主线程,建议使用
	# KBEngine.registerReadFileDescriptor()和KBEngine.registerWriteFileDescriptor()结合
	# KBEngine.urlopen("https://www.baidu.com",onHttpCallback)异步访问。也可以结合socket的方式与平台交互。

	# 如果返回码为KBEngine.SERVER_ERR_LOCAL_PROCESSING 则表示验证登陆成功,但dbmgr需要检查账号密码
	# KBEngine.SERVER_SUCCESS 则无需再检查密码

	loginNameList = loginName.split(':')

	# 0 登陆方式为账号登陆
	if loginNameList[0] == '0':
		KBEngine.accountLoginResponse(loginName, loginName, datas, KBEngine.SERVER_ERR_LOCAL_PROCESSING)

	# 1 登陆方式为微信登陆
	elif loginNameList[0] == '1':
		# 换取用户唯一标识 OpenID 和会话密钥 session_key
		url = "https://api.weixin.qq.com/sns/jscode2session?appid=%s&secret=%s&js_code=%s&grant_type=authorization_code" \
				% (GServerCfg.GC_APPInfo['appID'], GServerCfg.GC_APPInfo['AppSecret'], loginNameList[1])
		
		KBEngine.urlopen(url, GTools.functor(__getUserCallBack, loginName, password, datas))
Esempio n. 11
0
def setup_inputs(sess, filenames, image_size=None, capacity_factor=3):

    if image_size is None:
        image_size = FLAGS.sample_size
    
    #pdb.set_trace()

    
    reader = tf.TFRecordReader()

    filename_queue = tf.train.string_input_producer(filenames)
    key, value = reader.read(filename_queue)
    

    AlsoLabel=True
    kKick= myParams.myDict['InputMode'] == 'kKick'
    if kKick or myParams.myDict['InputMode'] == '1DFTx' or myParams.myDict['InputMode'] == '1DFTy' or myParams.myDict['InputMode'] == '2DFT':
        AlsoLabel=False

    batch_size=myParams.myDict['batch_size']

    channelsIn=myParams.myDict['channelsIn']
    channelsOut=myParams.myDict['channelsOut']
    DataH=myParams.myDict['DataH']
    DataW=myParams.myDict['DataW']
    LabelsH=myParams.myDict['LabelsH']
    LabelsW=myParams.myDict['LabelsW']
    
    if myParams.myDict['InputMode'] == 'AAA':

        
        #filename_queue = tf.Print(filename_queue,[filename_queue,],message='ZZZZZZZZZ:')
        keyX=key
        value = tf.Print(value,[keyX,],message='QQQ:')

        featuresA = tf.parse_single_example(
            value,
            features={
                'CurIs': tf.FixedLenFeature([], tf.string),
                'Labels': tf.FixedLenFeature([], tf.string)
            })
        feature = tf.decode_raw(featuresA['Labels'], tf.float32)
        CurIs = tf.decode_raw(featuresA['CurIs'], tf.float32)
        CurIs = tf.cast(CurIs, tf.int64)

        mx=CurIs
        feature = tf.Print(feature,[keyX,mx],message='QQQ:')
        feature = tf.Print(feature,[keyX,mx],message='QQQ:')
        feature = tf.Print(feature,[keyX,mx],message='QQQ:')
        feature = tf.Print(feature,[keyX,mx],message='QQQ:')
        feature = tf.Print(feature,[keyX,mx],message='QQQ:')

        feature = tf.reshape(feature, [DataH, DataW, channelsIn])
        feature = tf.cast(feature, tf.float32)

        label=feature

        features, labels = tf.train.batch([feature, label],
                                          batch_size=batch_size,
                                          num_threads=4,
                                          capacity = capacity_factor*batch_size,
                                          name='labels_and_features')

        tf.train.start_queue_runners(sess=sess)
        
        return features, labels

        
    #image = tf.image.decode_jpeg(value, channels=channels, name="dataset_image")

    #print('1')
    if AlsoLabel:
        featuresA = tf.parse_single_example(
            value,
            features={
                'DataH': tf.FixedLenFeature([], tf.int64),
                'DataW': tf.FixedLenFeature([], tf.int64),
                'channelsIn': tf.FixedLenFeature([], tf.int64),
                'LabelsH': tf.FixedLenFeature([], tf.int64),
                'LabelsW': tf.FixedLenFeature([], tf.int64),
                'channelsOut': tf.FixedLenFeature([], tf.int64),
                'data_raw': tf.FixedLenFeature([], tf.string),
                'labels_raw': tf.FixedLenFeature([], tf.string)
            })
        labels = tf.decode_raw(featuresA['labels_raw'], tf.float32)
    else:
        featuresA = tf.parse_single_example(
            value,
            features={
                'DataH': tf.FixedLenFeature([], tf.int64),
                'DataW': tf.FixedLenFeature([], tf.int64),
                'channelsIn': tf.FixedLenFeature([], tf.int64),
                'data_raw': tf.FixedLenFeature([], tf.string)
            })
    feature = tf.decode_raw(featuresA['data_raw'], tf.float32)

    print('setup_inputs')
    print('Data   H,W,#ch: %d,%d,%d -> Labels H,W,#ch %d,%d,%d' % (DataH,DataW,channelsIn,LabelsH,LabelsW,channelsOut))
    print('------------------')
    
    if myParams.myDict['InputMode'] == '1DFTy':
        feature = tf.reshape(feature, [256, 256, 1])
        feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
        
        mm=tf.reduce_mean(feature)
        mx=tf.reduce_max(feature)
        mx=tf.maximum(mx,1)

        #feature = tf.Print(feature,[mm,mx],message='QQQ:')        
        #assert_op = tf.Assert(tf.greater(mx, 0), [mx])
        #with tf.control_dependencies([assert_op]):

        feature = tf.cast(feature/mx, tf.complex64)
        
        Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
        IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
        
        label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
        feature=label

        HalfDataW=DataW/2

        Id=np.hstack([np.arange(HalfDataW,DataW), np.arange(0,HalfDataW)])
        Id=Id.astype(int)

        IQ2=tf.reshape(IQ,IQ.shape[0:2])
        feature=tf.fft(IQ2)
        feature = tf.gather(feature,Id,axis=1)
        feature = tf.reshape(feature, [DataH, DataW, channelsIn])
        feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)

        features, labels = tf.train.batch([feature, label],
                                          batch_size=batch_size,
                                          num_threads=4,
                                          capacity = capacity_factor*batch_size,
                                          name='labels_and_features')

        tf.train.start_queue_runners(sess=sess)
        
        return features, labels

    if myParams.myDict['InputMode'] == '1DFTx':
        feature = tf.reshape(feature, [256, 256, 1])
        feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
        
        mm=tf.reduce_mean(feature)
        mx=tf.reduce_max(feature)
        mx=tf.maximum(mx,1)

        #feature = tf.Print(feature,[mm,mx],message='QQQ:')        
        #assert_op = tf.Assert(tf.greater(mx, 0), [mx])
        #with tf.control_dependencies([assert_op]):

        feature = tf.cast(feature/mx, tf.complex64)
        
        Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
        IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
        
        label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
        feature=label

        HalfDataH=DataH/2

        Id=np.hstack([np.arange(HalfDataH,DataH), np.arange(0,HalfDataH)])
        Id=Id.astype(int)

        IQ2=tf.reshape(IQ,IQ.shape[0:2])
        IQ2 = tf.transpose(IQ2, perm=[1, 0])
        feature=tf.fft(IQ2)
        feature = tf.gather(feature,Id,axis=1)
        feature = tf.transpose(feature, perm=[1,0])
        feature = tf.reshape(feature, [DataH, DataW, channelsIn])
        feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)

        features, labels = tf.train.batch([feature, label],
                                          batch_size=batch_size,
                                          num_threads=4,
                                          capacity = capacity_factor*batch_size,
                                          name='labels_and_features')

        tf.train.start_queue_runners(sess=sess)
        
        return features, labels

    if myParams.myDict['InputMode'] == '2DFT':
        feature = tf.reshape(feature, [256, 256, 1])
        feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
        
        mm=tf.reduce_mean(feature)
        mx=tf.reduce_max(feature)
        mx=tf.maximum(mx,1)

        #feature = tf.Print(feature,[mm,mx],message='QQQ:')        
        #assert_op = tf.Assert(tf.greater(mx, 0), [mx])
        #with tf.control_dependencies([assert_op]):

        feature = tf.cast(feature/mx, tf.complex64)
        
        Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
        IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
        
        label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
        feature=label

        HalfDataH=DataH/2
        HalfDataW=DataW/2

        IdH=np.hstack([np.arange(HalfDataH,DataH), np.arange(0,HalfDataH)])
        IdH=IdH.astype(int)

        IdW=np.hstack([np.arange(HalfDataW,DataW), np.arange(0,HalfDataW)])
        IdW=IdW.astype(int)

        IQ2=tf.reshape(IQ,IQ.shape[0:2])

        IQ2=tf.fft(IQ2)
        IQ2=tf.gather(IQ2,IdW,axis=1)

        IQ2 = tf.transpose(IQ2, perm=[1, 0])
        feature=tf.fft(IQ2)
        feature = tf.gather(feature,IdH,axis=1)
        feature = tf.transpose(feature, perm=[1,0])
        feature = tf.reshape(feature, [DataH, DataW, channelsIn])
        feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)

        features, labels = tf.train.batch([feature, label],
                                          batch_size=batch_size,
                                          num_threads=4,
                                          capacity = capacity_factor*batch_size,
                                          name='labels_and_features')

        tf.train.start_queue_runners(sess=sess)
        
        return features, labels

    if kKick:
        filename_queue2 = tf.train.string_input_producer(filenames)
        key2, value2 = reader.read(filename_queue2)
        featuresA2 = tf.parse_single_example(
            value2,
            features={
                'DataH': tf.FixedLenFeature([], tf.int64),
                'DataW': tf.FixedLenFeature([], tf.int64),
                'channelsIn': tf.FixedLenFeature([], tf.int64),
                'data_raw': tf.FixedLenFeature([], tf.string)
            })
        feature2 = tf.decode_raw(featuresA2['data_raw'], tf.float32)

        feature = tf.reshape(feature, [DataH, DataW, channelsIn])
        feature2 = tf.reshape(feature2, [DataH, DataW, channelsIn])


        feature.set_shape([None, None, channelsIn])
        feature2.set_shape([None, None, channelsIn])

        feature = tf.cast(feature, tf.float32)/tf.reduce_max(feature)
        feature2 = tf.cast(feature2, tf.float32)/tf.reduce_max(feature)
        
        feature= tf.concat([feature,feature*0,feature2,feature2*0], 2)
        label=feature

        features, labels = tf.train.batch([feature, label],
                                          batch_size=batch_size,
                                          num_threads=4,
                                          capacity = capacity_factor*batch_size,
                                          name='labels_and_features')

        tf.train.start_queue_runners(sess=sess)
        
        return features, labels

    if myParams.myDict['InputMode'] == 'RegridTry1':
        # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
        FullData=scipy.io.loadmat(myParams.myDict['NMAP_FN'])
        
        NMapCR=FullData['NMapCR']
        NMapCR = tf.constant(NMapCR)

        feature=tf.gather(feature,NMapCR,validate_indices=None,name=None)

        feature = tf.reshape(feature, [DataH, DataW, channelsIn])
        feature = tf.cast(feature, tf.float32)
        
        labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
        label = tf.cast(labels, tf.float32)

        # Using asynchronous queues
        features, labels = tf.train.batch([feature, label],
                                          batch_size=batch_size,
                                          num_threads=4,
                                          capacity = capacity_factor*batch_size,
                                          name='labels_and_features')

        tf.train.start_queue_runners(sess=sess)
        
        return features, labels

    if myParams.myDict['InputMode'] == 'SMASHTry1':
        feature = tf.reshape(feature, [DataH, DataW, channelsIn])
        feature = tf.cast(feature, tf.float32)
        
        labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
        label = tf.cast(labels, tf.float32)

        # Using asynchronous queues
        features, labels = tf.train.batch([feature, label],
                                          batch_size=batch_size,
                                          num_threads=4,
                                          capacity = capacity_factor*batch_size,
                                          name='labels_and_features')

        tf.train.start_queue_runners(sess=sess)
        
        return features, labels

    """if myParams.myDict['Mode'] == 'RegridTry1C2':
        FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndC.mat')
        NMapCR=FullData['NMapCRC']
        NMapCR = tf.constant(NMapCR)

        feature=tf.gather(feature,NMapCR,validate_indices=None,name=None)

        feature = tf.reshape(feature, [DataH, DataW, channelsIn,2])
        feature = tf.cast(feature, tf.float32)
        
        labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
        label = tf.cast(labels, tf.float32)

        # Using asynchronous queues
        features, labels = tf.train.batch([feature, label],
                                          batch_size=batch_size,
                                          num_threads=4,
                                          capacity = capacity_factor*batch_size,
                                          name='labels_and_features')

        tf.train.start_queue_runners(sess=sess)
        
        return features, labels"""



    feature = tf.reshape(feature, [DataH, DataW, channelsIn])
    labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
    
    #print('44')
    #example.ParseFromString(serialized_example)
    #x_1 = np.array(example.features.feature['X'].float_list.value)

    # Convert from [depth, height, width] to [height, width, depth].
    #result.uint8image = tf.transpose(depth_major, [1, 2, 0])

    feature.set_shape([None, None, channelsIn])
    labels.set_shape([None, None, channelsOut])

    

    # Crop and other random augmentations
    #image = tf.image.random_flip_left_right(image)
    #image = tf.image.random_saturation(image, .95, 1.05)
    #image = tf.image.random_brightness(image, .05)
    #image = tf.image.random_contrast(image, .95, 1.05)

    #print('55')
    #wiggle = 8
    #off_x, off_y = 25-wiggle, 60-wiggle
    #crop_size = 128
    #crop_size_plus = crop_size + 2*wiggle
    #print('56')
    #image = tf.image.crop_to_bounding_box(image, off_y, off_x, crop_size_plus, crop_size_plus)
    #print('57')
    #image = tf.image.crop_to_bounding_box(image, 1, 2, crop_size, crop_size)
    #image = tf.random_crop(image, [crop_size, crop_size, 3])

    feature = tf.reshape(feature, [DataH, DataW, channelsIn])
    feature = tf.cast(feature, tf.float32) #/255.0

    
    labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
    label = tf.cast(labels, tf.float32) #/255.0


    #if crop_size != image_size:
    #    image = tf.image.resize_area(image, [image_size, image_size])

    # The feature is simply a Kx downscaled version
    #K = 1
    #downsampled = tf.image.resize_area(image, [image_size//K, image_size//K])

    #feature = tf.reshape(downsampled, [image_size//K, image_size//K, 3])
    #feature = tf.reshape(downsampled, [image_size//K, image_size//K, 3])
    #label   = tf.reshape(image,       [image_size,   image_size,     3])

    #feature = tf.reshape(image,     [image_size,    image_size,     channelsIn])
    #feature = tf.reshape(image,     [1, image_size*image_size*2,     channelsIn])
    #label   = tf.reshape(labels,    [image_size,    image_size,     channelsOut])

    # Using asynchronous queues
    features, labels = tf.train.batch([feature, label],
                                      batch_size=batch_size,
                                      num_threads=4,
                                      capacity = capacity_factor*batch_size,
                                      name='labels_and_features')

    tf.train.start_queue_runners(sess=sess)
    
    return features, labels
Esempio n. 12
0
def train_model(train_data):
    td = train_data

    summaries = tf.summary.merge_all()
    RestoreSession = False
    if not RestoreSession:
        td.sess.run(tf.global_variables_initializer())

    # lrval       = FLAGS.learning_rate_start
    learning_rate_start = myParams.myDict['learning_rate_start']
    lrval = myParams.myDict['learning_rate_start']
    start_time = time.time()
    last_summary_time = time.time()
    last_checkpoint_time = time.time()
    done = False
    batch = 0

    print("lrval %f" % (lrval))

    # assert FLAGS.learning_rate_half_life % 10 == 0

    # Cache test features and labels (they are small)
    test_feature, test_label = td.sess.run([td.test_features, td.test_labels])
    # test_label = td.sess.run([td.test_features, td.test_labels])

    G_LossV = np.zeros((1000000), dtype=np.float32)
    filename = os.path.join(myParams.myDict['train_dir'], 'TrainSummary.mat')

    feed_dictOut = {td.gene_minput: test_feature}
    gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)
    # _summarize_progress(td, test_label, gene_output, batch, 'out')

    feed_dict = {td.learning_rate: lrval}
    opsx = [td.gene_minimize, td.gene_loss]
    _, gene_loss = td.sess.run(opsx, feed_dict=feed_dict)

    # opsy = [td.gene_loss]
    # gene_loss = td.sess.run(opsy, feed_dict=feed_dict)

    # ops = [td.gene_minimize, td.disc_minimize, td.gene_loss, td.disc_real_loss, td.disc_fake_loss]
    # _, _, gene_loss, disc_real_loss, disc_fake_loss = td.sess.run(ops, feed_dict=feed_dict)

    batch += 1

    # run_metadata = tf.RunMetadata()
    gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)
    # gene_output = td.sess.run(td.gene_moutput, options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE, output_partition_graphs=True), feed_dict=feed_dictOut,run_metadata=run_metadata)
    # _summarize_progress(td, test_label, gene_output, batch, 'out')
    # with open("/tmp/run2.txt", "w") as out:
    #     out.write(str(run_metadata))

    # fetched_timeline = timeline.Timeline(run_metadata.step_stats)
    # chrome_trace = fetched_timeline.generate_chrome_trace_format()
    # with open('timeline_01.json', 'w') as f:
    #     f.write(chrome_trace)

    # tl = timeline.Timeline(run_metadata.step_stats)
    # print(tl.generate_chrome_trace_format(show_memory=True))
    # trace_file = tf.gfile.Open(name='timeline', mode='w')
    # trace_file.write(tl.generate_chrome_trace_format(show_memory=True))

    feed_dict = {td.learning_rate: lrval}
    # ops = [td.gene_minimize, td.disc_minimize, td.gene_loss, td.disc_real_loss, td.disc_fake_loss]

    opsx = [td.gene_minimize, td.gene_loss]
    _, gene_loss = td.sess.run(opsx, feed_dict=feed_dict)

    batch += 1

    gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)
    # _summarize_progress(td, test_label, gene_output, batch, 'out')

    # load model
    #saver.restore(sess,tf.train.latest_checkpoint('./'))
    # running model on data:test_feature
    RunOnData = False
    if RunOnData:
        filenames = tf.gfile.ListDirectory('DataAfterpMat')
        filenames = sorted(filenames)
        #filenames = [os.path.join('DataAfterpMat', f) for f in filenames]
        Ni = len(filenames)
        OutBase = myParams.myDict['SessionName'] + '_OutMat'
        tf.gfile.MakeDirs(OutBase)

        #pdb.set_trace()

        for index in range(Ni):
            print(index)
            print(filenames[index])
            CurData = scipy.io.loadmat(
                os.path.join('DataAfterpMat', filenames[index]))
            Data = CurData['CurData']
            Data = Data.reshape((1, 64, 64, 1))
            test_feature = np.kron(np.ones((16, 1, 1, 1)), Data)
            #test_feature = np.array(np.random.choice([0, 1], size=(16,64,64,1)), dtype='float32')

            feed_dictOut = {td.gene_minput: test_feature}
            gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)

            filenameOut = os.path.join(OutBase,
                                       filenames[index][:-4] + '_out.mat')

            SOut = {}
            SOut['X'] = gene_output[0]
            scipy.io.savemat(filenameOut, SOut)

    # pdb.set_trace()

    #_summarize_progress(td, test_feature, test_label, gene_output, batch, 'out')
    # to get value of var:
    # ww=td.sess.run(td.gene_var_list[1])

    if GT.getparam('ShowRealData') > 0:
        # ifilename=os.path.join('RealData', 'b.mat')

        if GT.getparam('InputMode') == 'RegridTry3F_B0T2S_ITS_MB':
            MB = GT.getparam('MB')
            nCh = GT.getparam('nccToUse')
            nTSC = GT.getparam('nTimeSegments')
            batch_size = myParams.myDict['batch_size']

            channelsIn = myParams.myDict['channelsIn']
            channelsOut = myParams.myDict['channelsOut']
            LabelsH = myParams.myDict['LabelsH']
            LabelsW = myParams.myDict['LabelsW']
            H = LabelsH
            W = LabelsW

            TimePoints_ms = GT.getparam('TimePoints_ms')
            SN = GT.getparam('SN')  # H,W,f8
            P = GT.getparam('P')  # H,W,f8
            nTraj = GT.getparam('nTraj')

            cCAIPIVecZ = GT.getparam('cCAIPIVecZ')  # MB,nTraj complex128
            TSBF = GT.getparam('TSBF')  # nTS,nTraj f64

            Rec0FN = '/opt/data/RealDataMB/meas_MID426_gBP_2dSpiral_multiecho_ASL_2mm_iso_run1_FID34170_2Echos/Sli06_Rec0.mat'
            B0TSFFN = '/opt/data/RealDataMB/meas_MID426_gBP_2dSpiral_multiecho_ASL_2mm_iso_run1_FID34170_2Echos/Sli06/B0TS.mat'
            SensDataFN = '/opt/data/RealDataMB/meas_MID426_gBP_2dSpiral_multiecho_ASL_2mm_iso_run1_FID34170_2Echos/Sli06/SensCC1.mat'
            RealDtFN = '/opt/data/RealDataMB/meas_MID426_gBP_2dSpiral_multiecho_ASL_2mm_iso_run1_FID34170_2Echos/Sli06/RealDataForNN.mat'

            Rec0F = scipy.io.loadmat(Rec0FN)
            Rec0 = Rec0F['Rec0']  # (96, 192) complex128
            Rec0MB = np.transpose(np.reshape(Rec0, [H, MB, W]),
                                  (0, 2, 1))  # H,W,MB complex128

            B0TSF = scipy.io.loadmat(B0TSFFN)
            CurB0 = B0TSF['CurB0']  # (96, 96, 2) f8, in Hz
            # B0TSF.keys()

            SensData = scipy.io.loadmat(SensDataFN)
            # SensData.keys()
            SensCCA = SensData['SensCCA']  # (96, 96, 16) complex128
            SensCCB = SensData['SensCCB']  # (96, 96, 16) complex128
            RealSensCCMB = np.stack((SensCCA, SensCCB),
                                    axis=3)  # (96, 96, 16,2) complex128
            RealSensCCMB = RealSensCCMB[:, :, :nCh, :]  # H,W,nCh,MB

            RealData = scipy.io.loadmat(RealDtFN)
            DataCC = RealData['DataCC']  # (10238, 16) (nTraj, ncc) complex64
            DataCC = DataCC[:, :nCh]  # (nTraj, nCh) complex64

            CurB0_Hz = CurB0[:, :, np.newaxis, :]

            RealTSC0 = np.complex64(
                np.exp(1j * 2 * np.pi * CurB0_Hz *
                       GT.NP_addDim(TimePoints_ms) / 1000))  # (96, 96, 7, 2)
            # Add T2* of 20
            T2S_est_ms = 20
            RealTSC0 = RealTSC0 * np.exp(
                -GT.NP_addDim(TimePoints_ms) / T2S_est_ms)

            WarmStart_ITS = Rec0MB[:, :,
                                   np.newaxis, :] * RealTSC0  # H,W,nTS,MB

            TSBFCAIPI = np.transpose(cCAIPIVecZ[:, :, np.newaxis],
                                     (1, 2, 0)) * np.transpose(
                                         TSBF[:, :, np.newaxis],
                                         (1, 0, 2))  # nTraj,nTS,MB c128
            cTSBFCAIPI = np.conj(
                np.transpose(
                    TSBFCAIPI[:, :, :, np.newaxis, np.newaxis],
                    (0, 1, 3, 2, 4)))  # nTraj,nTS,/nCh/,MB,/batch_size/

            DataCCMB = DataCC[:, np.newaxis, :, np.newaxis,
                              np.newaxis]  # nTraj,/nTS/,nCh,/MB/,/batch_size/
            DataCCMB = DataCCMB * cTSBFCAIPI  # nTraj,nTS,nCh,MB,/batch_size/

            # Padded=sps_x_dense_vecs(np.conj(np.transpose(P)),DataCCMB)
            Padded = np.conj(np.transpose(P)) * np.reshape(
                DataCCMB, (nTraj, -1))
            Padded = np.reshape(Padded, (H * 2, W * 2, nTSC, nCh, MB, 1))
            Padded = np.transpose(Padded, (2, 3, 4, 5, 0, 1))
            # Padded=np.reshape(Padded,(nTSC,nCh,MB,1,H*2,W*2))
            IFPadded = np.fft.ifft2(Padded, axes=(-2, -1))

            Cropped = IFPadded[:, :, :, :, :H, :
                               W]  # nTS,nCh,MB,/batch_size/,H,W
            CroppedP = np.transpose(
                Cropped, (5, 4, 1, 2, 0, 3))  # H,W,nCh,MB,nTS,/batch_size/
            Real_NUFFTHSig = np.sum(
                CroppedP *
                np.conj(RealSensCCMB[:, :, :, :, np.newaxis, np.newaxis]),
                axis=2)  # H,W,MB,nTS,/batch_size/
            # RealSensCCMB=RealSensCCMB[:,:,:nCh,:] # H,W,nCh,MB
            Real_NUFFTHSig = np.transpose(Real_NUFFTHSig, (4, 0, 1, 3, 2))
            Real_NUFFTHSig = Real_NUFFTHSig * np.conj(
                SN[np.newaxis, :, :, np.newaxis,
                   np.newaxis])  # /batch_size/,H,W,nTS,MB

            RealSensCCMB1D = GT.NP_ConcatRIOn0(
                np.reshape(RealSensCCMB, (-1, 1, 1)))
            Real_feature = RealSensCCMB1D
            RealTSC01D = GT.NP_ConcatRIOn0(np.reshape(RealTSC0, (-1, 1, 1)))
            Real_feature = np.concatenate((Real_feature, RealTSC01D), axis=0)
            WarmStart_ITS1D = GT.NP_ConcatRIOn0(
                np.reshape(WarmStart_ITS, (-1, 1, 1)))
            Real_feature = np.concatenate((Real_feature, WarmStart_ITS1D),
                                          axis=0)
            Real_NUFFTHSig1D = GT.NP_ConcatRIOn0(
                np.reshape(Real_NUFFTHSig, (-1, 1, 1)))
            Real_feature = np.concatenate((Real_feature, Real_NUFFTHSig1D),
                                          axis=0)

            Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

        if myParams.myDict['InputMode'] == 'SPEN_FC':
            ifilename = myParams.myDict['RealDataFN']
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']
            # RealData=np.reshape(RealData,)
            #             RealData=RealData
            Real_feature = RealData

        if myParams.myDict['InputMode'] == 'SPEN_Local':
            ifilename = myParams.myDict['RealDataFN']
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']
            #             RealData=RealData
            Real_feature = RealData

        if False:
            if RealData.ndim == 2:
                RealData = RealData.reshape(
                    (RealData.shape[0], RealData.shape[1], 1, 1))
            if RealData.ndim == 3:
                RealData = RealData.reshape(
                    (RealData.shape[0], RealData.shape[1], RealData.shape[2],
                     1))

        if myParams.myDict['InputMode'] == 'RegridTry1' or myParams.myDict[
                'InputMode'] == 'RegridTry2':
            ifilename = myParams.myDict['RealDataFN']
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']
            # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
            FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
            NMapCR = FullData['NMapCR']

            batch_size = myParams.myDict['batch_size']

            Real_feature = np.reshape(RealData[0], [RealData.shape[1]])
            Real_feature = np.take(Real_feature, NMapCR)
            Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

        if myParams.myDict['InputMode'] == 'RegridTry3' or myParams.myDict[
                'InputMode'] == 'RegridTry3M' or myParams.myDict[
                    'InputMode'] == 'RegridTry3F' or myParams.myDict[
                        'InputMode'] == 'RegridTry3FMB' or myParams.myDict[
                            'InputMode'] == 'RegridTry3FME':
            ifilename = myParams.myDict['RealDataFN']
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']
            batch_size = myParams.myDict['batch_size']
            nTraj = myParams.myDict['nTraj']
            RealDatancc = myParams.myDict['RealDatancc']
            nccInData = myParams.myDict['nccInData']

            # RealData=RealData
            # RealData=np.reshape(RealData,[batch_size,RealDatancc,nTraj,2])
            # RealData=RealData[:,0:nccInData,:,:]
            # RealData=np.reshape(RealData,[batch_size,nTraj,RealDatancc,2])
            # RealData=RealData[:,:,0:nccInData,:]
            # RealData=np.reshape(RealData,[batch_size,-1])
            RealData = RealData[0, :]
            RealData = np.tile(RealData, (batch_size, 1))
            Real_feature = np.reshape(
                RealData, [RealData.shape[0], RealData.shape[1], 1, 1])

        Real_dictOut = {td.gene_minput: Real_feature}

    # LearningDecayFactor=np.power(2,(-1/FLAGS.learning_rate_half_life))
    learning_rate_half_life = myParams.myDict['learning_rate_half_life']
    LearningDecayFactor = np.power(2, (-1 / learning_rate_half_life))

    # train_time=FLAGS.train_time
    train_time = myParams.myDict['train_time']

    QuickFailureTimeM = myParams.myDict['QuickFailureTimeM']
    QuickFailureThresh = myParams.myDict['QuickFailureThresh']

    summary_period = myParams.myDict['summary_period']  # in Minutes
    checkpoint_period = myParams.myDict['checkpoint_period']  # in Minutes

    DiscStartMinute = myParams.myDict['DiscStartMinute']

    gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)

    if myParams.myDict['ShowRealData'] > 0:
        gene_RealOutput = td.sess.run(td.gene_moutput, feed_dict=Real_dictOut)
        gene_output[0] = gene_RealOutput[0]

    Asuffix = 'out_%06.4f' % (gene_loss)
    _summarize_progress(td, test_label, gene_output, batch, Asuffix)

    print("Adding to saver:")
    var_listX = td.gene_var_list
    var_listX = [v for v in var_listX if "Bank" not in v.name]
    #     var_listX = tf.sort(var_listX)
    for line in var_listX:
        print("Adding " + line.name + '           ' +
              str(line.shape.as_list()))
    print("Saver var list end")

    saver = tf.train.Saver(var_listX)
    # _save_checkpoint(td, batch,G_LossV,saver)

    tf.get_default_graph().finalize()

    while not done:
        batch += 1
        gene_loss = disc_real_loss = disc_fake_loss = -1.234

        # elapsed = int(time.time() - start_time)/60
        CurTime = time.time()
        elapsed = (time.time() - start_time) / 60

        # Update learning rate
        lrval *= LearningDecayFactor
        if (learning_rate_half_life < 1000):  # in minutes
            lrval = learning_rate_start * np.power(
                0.5, elapsed / learning_rate_half_life)

        #print("batch %d gene_l1_factor %f' " % (batch,FLAGS.gene_l1_factor))
        # if batch==200:
        if elapsed > DiscStartMinute:
            FLAGS.gene_l1_factor = 0.9

        RunDiscriminator = FLAGS.gene_l1_factor < 0.999

        feed_dict = {td.learning_rate: lrval}
        if RunDiscriminator:
            ops = [
                td.gene_minimize, td.disc_minimize, td.gene_loss,
                td.disc_real_loss, td.disc_fake_loss
            ]
            _, _, gene_loss, disc_real_loss, disc_fake_loss = td.sess.run(
                ops, feed_dict=feed_dict)
        else:
            ops = [
                td.gene_minimize, td.gene_loss, td.MoreOut, td.MoreOut2,
                td.MoreOut3
            ]
            _, gene_loss, MoreOut, MoreOut2, MoreOut3 = td.sess.run(
                ops, feed_dict=feed_dict)

        G_LossV[batch] = gene_loss

        # ggg: Force phase only var
        # VR = [v for v in tf.global_variables() if v.name == "gene/GEN_L004/add_Mult2DMCyCSharedOverFeat_weightR:0"][0]
        # VI = [v for v in tf.global_variables() if v.name == "gene/GEN_L004/add_Mult2DMCyCSharedOverFeat_weightI:0"][0]
        # VRX=td.sess.run(VR);
        # VIX=td.sess.run(VI);
        # VC=VRX+1J*VIX
        # Norm=np.abs(VC)
        # Norm[Norm == 0] = 0.00001
        # VRX=VRX/Norm
        # VIX=VIX/Norm
        # VR.load(VRX, td.sess)
        # VI.load(VIX, td.sess)

        # VR = [v for v in tf.global_variables() if v.name == "gene/GEN_L005/add_Mult2DMCxCSharedOverFeat_weightR:0"][0]
        # VI = [v for v in tf.global_variables() if v.name == "gene/GEN_L005/add_Mult2DMCxCSharedOverFeat_weightI:0"][0]
        # VRX=td.sess.run(VR);
        # VIX=td.sess.run(VI);
        # VC=VRX+1J*VIX
        # Norm=np.abs(VC)
        # Norm[Norm == 0] = 0.00001
        # VRX=VRX/Norm
        # VIX=VIX/Norm
        # VR.load(VRX, td.sess)
        # VI.load(VIX, td.sess)

        # VR = [v for v in tf.global_variables() if v.name == "gene/GEN_L004/einsum_weightR:0"][0]
        # VI = [v for v in tf.global_variables() if v.name == "gene/GEN_L004/einsum_weightI:0"][0]
        # VRX=td.sess.run(VR);
        # VIX=td.sess.run(VI);
        # HmngWnd=np.power(np.hamming(98),1)
        # HmngWnd=np.reshape(HmngWnd,[98,1,1])
        # VC=VRX +1j*VIX

        # FVC=GT.gfft(VC,dim=0)
        # FVC=FVC*HmngWnd
        # VC=GT.gifft(FVC,dim=0)
        # VYR=np.real(VC)
        # VYI=np.imag(VC)
        # VR.load(VYR, td.sess)
        # VI.load(VYI, td.sess)

        if batch % 10 == 0:

            # pdb.set_trace()

            # Show we are alive
            #print('Progress[%3d%%], ETA[%4dm], Batch [%4d], G_Loss[%3.3f], D_Real_Loss[%3.3f], D_Fake_Loss[%3.3f]' %
            #      (int(100*elapsed/train_time), train_time - int(elapsed), batch, gene_loss, disc_real_loss, disc_fake_loss))

            print(
                'Progress[%3d%%], ETA[%4dm], Batch [%4d], G_Loss[%3.3f], D_Real_Loss[%3.3f], D_Fake_Loss[%3.3f], MoreOut[%3.3f, %3.3f, %3.3f]'
                % (int(100 * elapsed / train_time), train_time - int(elapsed),
                   batch, gene_loss, disc_real_loss, disc_fake_loss, MoreOut,
                   MoreOut2, MoreOut3))

            # VLen=td.gene_var_list.__len__()
            # for i in range(0, VLen):
            #     print(td.gene_var_list[i].name);

            # print(VRX.dtype)
            # print(VRX)
            # exit()
            # var_23 = [v for v in tf.global_variables() if v.name == "gene/GEN_L020/C2D_weight:0"][0]
            # tmp=td.sess.run(td.gene_var_list[i])
            # v.load([2, 3], td.sess)

            if np.isnan(gene_loss):
                print('NAN!!')
                done = True

            # ggg: quick failure test
            if elapsed > QuickFailureTimeM:
                if gene_loss > QuickFailureThresh:
                    print('Quick failure!!')
                    done = True
                else:
                    QuickFailureTimeM = 10000000

            # Finished?
            current_progress = elapsed / train_time
            if current_progress >= 1.0:
                done = True

            StopFN = '/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/stop.a'
            if os.path.isfile(StopFN):
                print('Stop file used!!')
                done = True
                try:
                    tf.gfile.Remove(StopFN)
                except:
                    pass

            # Update learning rate
            # if batch % FLAGS.learning_rate_half_life == 0:
            #     lrval *= .5

        # if batch % FLAGS.summary_period == 0:
        if (CurTime - last_summary_time) / 60 > summary_period:
            # Show progress with test features
            # feed_dict = {td.gene_minput: test_feature}
            gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)

            if myParams.myDict['ShowRealData'] > 0:
                gene_RealOutput = td.sess.run(td.gene_moutput,
                                              feed_dict=Real_dictOut)
                gene_output[0] = gene_RealOutput[0]

            Asuffix = 'out_%06.4f' % (gene_loss)
            _summarize_progress(td, test_label, gene_output, batch, Asuffix)

            last_summary_time = time.time()

        # if batch % FLAGS.checkpoint_period == 0:
        SaveCheckpoint_ByTime = (CurTime -
                                 last_checkpoint_time) / 60 > checkpoint_period
        CheckpointFN = '/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/save.a'
        SaveCheckPointByFile = os.path.isfile(CheckpointFN)
        if SaveCheckPointByFile:
            tf.gfile.Remove(CheckpointFN)

        if SaveCheckpoint_ByTime or SaveCheckPointByFile:
            last_checkpoint_time = time.time()
            # Save checkpoint
            _save_checkpoint(td, batch, G_LossV, saver)

        RunOnAllFN = '/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/RunOnAll.a'
        RunOnAllFNByFile = os.path.isfile(RunOnAllFN)
        if RunOnAllFNByFile:
            tf.gfile.Remove(RunOnAllFN)

            for r in range(1, 81):
                ifilenamePrefix = myParams.myDict['LoadAndRunOnData_Prefix']
                #                 ifilename=ifilenamePrefix +  f'{r:02}' + '.mat'
                ifilename = ifilenamePrefix + '%02d.mat' % (r)
                RealData = scipy.io.loadmat(ifilename)
                RealData = RealData['Data']

                if RealData.ndim == 2:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1], 1, 1))
                if RealData.ndim == 3:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1],
                         RealData.shape[2], 1))

                Real_feature = RealData

                if myParams.myDict[
                        'InputMode'] == 'RegridTry1' or myParams.myDict[
                            'InputMode'] == 'RegridTry2':
                    # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
                    FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
                    NMapCR = FullData['NMapCR']

                    batch_size = myParams.myDict['batch_size']

                    Real_feature = np.reshape(RealData[0], [RealData.shape[1]])
                    Real_feature = np.take(Real_feature, NMapCR)
                    Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

                Real_dictOut = {td.gene_minput: Real_feature}

                gene_RealOutput = td.sess.run(td.gene_moutput,
                                              feed_dict=Real_dictOut)

                OnRealData = {}
                OnRealDataM = gene_RealOutput
                #                 filenamex = 'OnRealData' + f'{r:02}' + '.mat'
                filenamex = 'OnRealData' + '%02d.mat' % (r)

                LoadAndRunOnData_OutP = myParams.myDict[
                    'LoadAndRunOnData_OutP']
                filename = os.path.join(LoadAndRunOnData_OutP, filenamex)
                OnRealData['x'] = OnRealDataM
                scipy.io.savemat(filename, OnRealData)

            print('Saved recon of real data')

    _save_checkpoint(td, batch, G_LossV, saver)

    print('Finished training!')
Esempio n. 13
0
def _train():

    # LoadAndRunOnData=False
    LoadAndRunOnData = myParams.myDict['LoadAndRunOnData'] > 0
    if LoadAndRunOnData:
        # Setup global tensorflow state
        sess, summary_writer = setup_tensorflow()

        # Prepare directories
        # filenames = prepare_dirs(delete_train_dir=False)

        # Setup async input queues
        # features, labels = srez_input.setup_inputs(sess, filenames)
        features, labels = srez_input.setup_inputs(sess, 1)

        # Create and initialize model
        [gene_minput, gene_moutput,
         gene_output, gene_var_list,
         disc_real_output, disc_fake_output, disc_var_list] = \
                srez_modelBase.create_model(sess, features, labels)

        # Restore variables from checkpoint
        print("Adding to saver:")
        var_listX = gene_var_list
        var_listX = [v for v in var_listX if "Bank" not in v.name]
        for line in var_listX:
            print("Adding " + line.name + '           ' +
                  str(line.shape.as_list()))
        print("Saver var list end")

        saver = tf.train.Saver(var_listX)
        # saver = tf.train.Saver()
        filename = 'checkpoint_new'
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS2_dataNeighborhoodRCB0__2018-06-08_16-17-56_checkpoint', filename)
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS2_dataNeighborhoodRCB0__2018-06-09_19-44-17_checkpoint', filename)
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS__2018-06-29_10-39-13_checkpoint', filename)
        checkpointP = myParams.myDict['LoadAndRunOnData_checkpointP']
        filename = os.path.join(checkpointP, filename)

        saver.restore(sess, filename)

        if myParams.myDict['Mode'] == 'RegridTry1' or myParams.myDict[
                'Mode'] == 'RegridTry1C' or myParams.myDict[
                    'Mode'] == 'RegridTry1C2' or myParams.myDict[
                        'Mode'] == 'RegridTry1C2_TS' or myParams.myDict[
                            'Mode'] == 'RegridTry1C2_TS2':
            FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
            NMapCR = FullData['NMapCR']

        for r in range(1, myParams.myDict['HowManyToRun']):
            # ifilename='/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RealData/b_Ben14May_Sli5_r' +  f'{r:02}' + '.mat'
            # ifilename='/media/a/DATA/14May18/Ben/meas_MID109_gBP_VD11_U19_4min_FID17944/RealData/Sli11_r' +  f'{r:02}' + '.mat'
            if myParams.myDict['InputMode'] == 'Cart3SB':
                # print('Loaded SensMaps, Shape %d %d %d %d' % (SensMapsSz[0],SensMapsSz[1],SensMapsSz[2],SensMapsSz[3]))
                print('Cart3SB running on real data %d' % r)
                # feature  shape: (1048576, 1, 1) <dtype: 'float32'>
                batch_size = myParams.myDict['batch_size']
                # RealData=np.zeros((batch_size,1048576, 1, 1),np.float32)
                # RealData=np.zeros((batch_size,640000, 1, 1),np.float32)

                # Simulating RealData from ITS, Sens
                MB = GT.getparam('MB')
                TimePoints_ms = GT.getparam('TimePoints_ms')
                nTSC = TimePoints_ms.shape[0]
                nCh = GT.getparam('nccToUse')

                LabelsH = myParams.myDict['LabelsH']
                LabelsW = myParams.myDict['LabelsW']

                H = LabelsH
                W = LabelsW

                SnsFN = '/opt/data/CCSensMaps.mat'
                fS = h5py.File(SnsFN, 'r')
                SensMaps = fS['SensCC']
                SensMaps = SensMaps['real'] + 1j * SensMaps['imag']
                SensMapsSz = SensMaps.shape
                print('r Loaded SensMaps, Shape %d %d %d %d' %
                      (SensMapsSz[0], SensMapsSz[1], SensMapsSz[2],
                       SensMapsSz[3]))
                SensMaps = SensMaps[:, :, :, :nCh]

                NumSensMapsInFile = SensMaps.shape[0]
                # IdxS=15
                for b in range(0, MB):
                    # if b==1:
                    #     IdxB2=tf.random_uniform([1],minval=12,maxval=19,dtype=tf.int32)
                    #     IdxS=IdxS+IdxB2[0]
                    #     IdxS=tf.cond(IdxS[0]>=NumSensMapsInFile, lambda: IdxS-NumSensMapsInFile, lambda: IdxS)

                    # Sens=np.squeeze(SensMaps[IdxS,:,:,:],axis=0)
                    Sens = (SensMaps[15, :, :, :])

                    Sens = Sens[:H, :W, :nCh]

                    # Sens = tf.image.random_flip_left_right(Sens)
                    # Sens = tf.image.random_flip_up_down(Sens)
                    # uS=tf.random_uniform([1])
                    # Sens=tf.cond(uS[0]<0.5, lambda: tf.identity(Sens), lambda: tf.image.rot90(Sens))
                    SensMsk = GT.NP_addDim(
                        np.sum(np.abs(Sens), axis=2) > 0).astype(np.complex64)
                    Sens = GT.NP_addDim(Sens)

                    if b == 0:
                        SensMB = Sens
                        SensMskMB = SensMsk

                    # else:
                    #     SensMB=tf.concat([SensMB,Sens],axis=3) #     SensMB H W nCh MB
                    #     SensMskMB=tf.concat([SensMskMB,SensMsk],axis=2) #     SensMskMB H W MB

                # nToLoad=myParams.myDict['nToLoad']
                # LoadAndRunOnData=myParams.myDict['LoadAndRunOnData']>0
                # if LoadAndRunOnData:
                nToLoad = 300

                print('r loading images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
                GREBaseP = '/opt/data/'
                SFN = GREBaseP + 'All_Orientation-0x.mat'
                f = h5py.File(SFN, 'r')
                I = f['CurSetAll'][0:nToLoad]
                print('r Loaded images ' + time.strftime("%Y-%m-%d %H:%M:%S"))

                SendTSCest = GT.getparam('SendTSCest') > 0
                HamPow = GT.getparam('HamPow')

                # def TFexpix(X): return tf.exp(tf.complex(tf.zeros_like(X),X))
                def NPexpix(X):
                    return np.exp(1j * X)

                for b in range(0, MB):
                    # TFI = tf.constant(np.int16(I))
                    # Idx=tf.random_uniform([1],minval=0,maxval=I.shape[0],dtype=tf.int32)
                    Idx = 133

                    Data4 = (I[Idx, :, :, :])
                    # Data4=tf.squeeze(tf.slice(I,[Idx[0],0,0,0],[1,-1,-1,-1]),axis=0)
                    # Data4 = tf.image.random_flip_left_right(Data4)
                    # Data4 = tf.image.random_flip_up_down(Data4)

                    # u1=tf.random_uniform([1])
                    # Data4=tf.cond(u1[0]<0.5, lambda: tf.identity(Data4), lambda: tf.image.rot90(Data4))

                    # Data4 = tf.random_crop(Data4, [H, W, 4])
                    # Data4 = tf.random_crop(Data4, [:H, :W, 4])
                    Data4 = Data4[:H, :W, :]

                    # M=tf.slice(Data4,[0,0,0],[-1,-1,1])
                    # Ph=tf.slice(Data4,[0,0,1],[-1,-1,1])
                    # feature=tf.cast(M,tf.complex64)*TFexpix(Ph)
                    M = Data4[:, :, 0]
                    Ph = Data4[:, :, 1]
                    feature = M.astype(np.complex64) * NPexpix(Ph)

                    feature = GT.NP_addDim(feature) * SensMskMB[:, :, b:b + 1]

                    T2S_ms = Data4[:, :, 2]
                    # T2S_ms = tf.where( T2S_ms<1.5, 10000 * tf.ones_like( T2S_ms ), T2S_ms )
                    T2S_ms[T2S_ms < 1.5] = 10000

                    B0_Hz = Data4[:, :, 3]
                    # B0_Hz=M*0

                    # T2S_ms = tf.where( tf.is_nan(T2S_ms), 10000 * tf.ones_like( T2S_ms ), T2S_ms )
                    T2S_ms[np.isnan(T2S_ms)] = 10000
                    # B0_Hz = tf.where( tf.is_nan(B0_Hz), tf.zeros_like( B0_Hz ), B0_Hz )
                    B0_Hz[np.isnan(B0_Hz)] = 0

                    if SendTSCest:
                        # HamPowA=10
                        HamPowA = HamPow
                        HamA = np.roll(np.hamming(H), np.int32(H / 2))
                        HamA = np.power(HamA, HamPowA)
                        HamXA = np.reshape(HamA, (1, H, 1))
                        HamYA = np.reshape(HamA, (1, 1, W))

                        B0_Hz_Smoothed = np.transpose(
                            GT.NP_addDim(B0_Hz.astype(np.complex64)),
                            (2, 0, 1))
                        B0_Hz_Smoothed = np.fft.fft2(B0_Hz_Smoothed)
                        B0_Hz_Smoothed = B0_Hz_Smoothed * HamXA
                        B0_Hz_Smoothed = B0_Hz_Smoothed * HamYA
                        B0_Hz_Smoothed = np.fft.ifft2(B0_Hz_Smoothed)
                        B0_Hz_Smoothed = np.transpose(B0_Hz_Smoothed,
                                                      (1, 2, 0))
                        B0_Hz_Smoothed = np.real(B0_Hz_Smoothed)

                        TSCest = np.exp(1j * 2 * np.pi *
                                        (B0_Hz_Smoothed * TimePoints_ms /
                                         1000).astype(np.complex64))
                        # TSCest=np.ones(TSCest.shape).astype(np.complex64)
                        print('TSCest shape: ' + str(TSCest.shape))
                        # TSCest=TSCest*0+1
                        # print('TSCest shape: ' + str(TSCest.shape))
                        # print('reducing B0')
                        # print('B0_Hz shape: ' + str(B0_Hz.shape))
                        # print('B0_Hz_Smoothed shape: ' + str(B0_Hz_Smoothed.shape))
                        # B0_Hz=B0_Hz-np.squeeze(B0_Hz_Smoothed)
                        # print('B0_Hz shape: ' + str(B0_Hz.shape))

                    # urand_ms=tf.random_uniform([1])*12
                    # urand_sec=(tf.random_uniform([1])*2-1)*3/1000

                    # feature=feature*tf.cast(tf.exp(-urand_ms/T2S_ms),tf.complex64)
                    # feature=feature*TFexpix(2*np.pi*B0_Hz*urand_sec)

                    mx = M.max()
                    mx = np.maximum(mx, 1)
                    mx = mx.astype(np.complex64)

                    feature = feature / mx

                    CurIWithPhase = feature

                    TSCM = np.exp(-TimePoints_ms / GT.NP_addDim(T2S_ms))
                    TSCP = np.exp(1j * 2 * np.pi *
                                  (GT.NP_addDim(B0_Hz) * TimePoints_ms /
                                   1000).astype(np.complex64))
                    TSC = TSCM.astype(np.complex64) * TSCP

                    ITSbase = CurIWithPhase * TSC  # ITSbase is H,W,nTSC

                    TSC = GT.NP_addDim(TSC)
                    ITSbase = GT.NP_addDim(ITSbase)
                    if b == 0:
                        CurIWithPhaseMB = CurIWithPhase
                        TSCMB = TSC
                        ITSbaseMB = ITSbase
                        if SendTSCest:
                            TSCest = GT.NP_addDim(TSCest)
                            TSCMBest = TSCest
                    # else:
                    #     CurIWithPhaseMB=tf.concat([CurIWithPhaseMB,CurIWithPhase],axis=2) #     CurIWithPhaseMB H W MB
                    #     TSCMB=tf.concat([TSCMB,TSC],axis=3) #     TSCMB H W nTSC MB
                    #     ITSbaseMB=tf.concat([ITSbaseMB,ITSbase],axis=3) #     ITSbaseMB H W nTSC MB
                    #     if SendTSCest:
                    #         TSCMBest=tf.stack([TSCMBest,TSCest],axis=3)
                print('r ok 2')
                ITS_P = np.transpose(
                    GT.NP_addDim(ITSbaseMB),
                    (4, 0, 1, 2, 3))  # /batch_size/,H,W,nTSC,MB

                Msk3 = np.zeros((H, W, nTSC, 1, 1, 1))

                PEShifts = GT.getparam('PEShifts')
                PEJump = GT.getparam('PEJump')
                print('r Using PEShifts')
                for i in range(nTSC):
                    Msk3[PEShifts[i]::PEJump, :, i, :, :, :] = 1

                Msk3 = np.complex64(Msk3)

                # GT.setparam('CartMask',Msk3)

                Sens6 = SensMB[:, :, np.newaxis, :, :,
                               np.newaxis]  # H,W,/nTS/,nCh,MB,/batch_size/

                # AHA_ITS=GT.Cartesian_OPHOP_ITS_MB(ITS_P,Sens6,Msk3)

                ITS = np.transpose(ITSbaseMB, (0, 3, 2, 1))  # H, nTSC, W
                ITS = np.reshape(ITS, (H, W * nTSC * MB, 1))
                ITS_RI = GT.NP_ConcatRIOn2(ITS)

                Sensc = SensMB
                Sens1D = GT.NP_ConcatRIOn0(np.reshape(Sensc, (-1, 1, 1)))
                feature = Sens1D

                AHA_ITS = GT.NP_Cartesian_OPHOP_ITS_MB(ITS_P, Sens6, Msk3)
                # new simpler approach
                if SendTSCest:
                    TSCMBest_P = np.transpose(
                        GT.NP_addDim(TSCMBest),
                        (4, 0, 1, 2, 3))  # /batch_size/,H,W,nTSC,MB
                    AHA_ITS = AHA_ITS * np.conj(TSCMBest_P)

                #         send AHA_ITS
                AHA_ITS_1D = GT.NP_ConcatRIOn0(np.reshape(AHA_ITS, (-1, 1, 1)))
                feature = np.concatenate((feature, AHA_ITS_1D), axis=0)

                if SendTSCest:
                    TSCest1D = GT.NP_ConcatRIOn0(
                        np.reshape(TSCMBest_P, (-1, 1, 1)))
                    feature = np.concatenate((feature, TSCest1D), axis=0)

                RealData = np.tile(feature, (batch_size, 1, 1, 1))

                # End simulating RealData
                Real_feature = RealData
            else:
                ifilenamePrefix = myParams.myDict['LoadAndRunOnData_Prefix']
                #             ifilename=ifilenamePrefix +  f'{r:02}' + '.mat'
                ifilename = ifilenamePrefix + '%02d.mat' % (r)
                RealData = scipy.io.loadmat(ifilename)
                RealData = RealData['Data']

                if RealData.ndim == 2:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1], 1, 1))
                if RealData.ndim == 3:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1],
                         RealData.shape[2], 1))

                Real_feature = RealData

                # if myParams.myDict['Mode'] == 'RegridTry1' or myParams.myDict['Mode'] == 'RegridTry1C' or myParams.myDict['Mode'] == 'RegridTry1C2' or myParams.myDict['Mode'] == 'RegridTry1C2_TS' or myParams.myDict['Mode'] == 'RegridTry1C2_TS2':
                #     batch_size=myParams.myDict['batch_size']

                #     Real_feature=np.reshape(RealData[0],[RealData.shape[1]])
                #     Real_feature=np.take(Real_feature,NMapCR)
                #     Real_feature=np.tile(Real_feature, (batch_size,1,1,1))

            if myParams.myDict['InputMode'] == 'RegridTry1' or myParams.myDict[
                    'InputMode'] == 'RegridTry2':
                # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
                FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
                NMapCR = FullData['NMapCR']

                batch_size = myParams.myDict['batch_size']

                Real_feature = np.reshape(RealData[0], [RealData.shape[1]])
                Real_feature = np.take(Real_feature, NMapCR)
                Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

            Real_dictOut = {gene_minput: Real_feature}

            gene_RealOutput = sess.run(gene_moutput, feed_dict=Real_dictOut)

            OnRealData = {}
            OnRealDataM = gene_RealOutput
            #             filenamex = 'OnRealData' + f'{r:02}' + '.mat'
            filenamexBase = 'OnRealData' + '%02d' % (r)
            filenamex = filenamexBase + '.mat'

            LoadAndRunOnData_OutP = myParams.myDict['LoadAndRunOnData_OutP']
            filename = os.path.join(LoadAndRunOnData_OutP, filenamex)
            OnRealData['x'] = OnRealDataM
            scipy.io.savemat(filename, OnRealData)

            image = np.sqrt(
                np.square(OnRealDataM[0, -H:, :(W * 3), 0]) +
                np.square(OnRealDataM[0, -H:, :(W * 3), 1]))
            filenamep = filenamexBase + '.png'
            filename = os.path.join(LoadAndRunOnData_OutP, filenamep)
            imageio.imwrite(filename, image)

        print('Saved recon of real data')
        exit()

    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=True)

    # Separate training and test sets
    #train_filenames = all_filenames[:-FLAGS.test_vectors]
    train_filenames = all_filenames
    #test_filenames  = all_filenames[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    #pdb.set_trace()

    # ggg Signal Bank stuff:
    if myParams.myDict['BankSize'] > 0:
        if myParams.myDict['InputMode'] == 'RegridTry3FMB':
            BankSize = myParams.myDict['BankSize'] * 2

            # BankInit=np.zeros([BankSize,myParams.myDict['DataH'],1,1])
            # LBankInit=np.zeros([BankSize,myParams.myDict['LabelsH'],myParams.myDict['LabelsW'], 2])
            with tf.variable_scope("aaa"):
                Bank = tf.get_variable(
                    "Bank",
                    shape=[BankSize, myParams.myDict['DataH'], 1, 1],
                    dtype=tf.float32,
                    trainable=False)
                LBank = tf.get_variable("LBank",
                                        shape=[
                                            BankSize,
                                            myParams.myDict['LabelsH'],
                                            myParams.myDict['LabelsW'], 2
                                        ],
                                        dtype=tf.float32,
                                        trainable=False)
                # LBank=tf.get_variable("LBank",initializer=tf.cast(LBankInit, tf.float32),dtype=tf.float32,trainable=False)
        else:
            BankSize = myParams.myDict['BankSize']

            BankInit = np.zeros([BankSize, myParams.myDict['DataH'], 1, 1])
            LBankInit = np.zeros([
                BankSize, myParams.myDict['LabelsH'],
                myParams.myDict['LabelsW'], 2
            ])
            with tf.variable_scope("aaa"):
                # Bank=tf.get_variable("Bank",initializer=tf.cast(BankInit, tf.float32),dtype=tf.float32)
                Bank = tf.get_variable(
                    "Bank",
                    shape=[BankSize, myParams.myDict['DataH'], 1, 1],
                    dtype=tf.float32,
                    trainable=False)
                LBank = tf.get_variable("LBank",
                                        shape=[
                                            BankSize,
                                            myParams.myDict['LabelsH'],
                                            myParams.myDict['LabelsW'], 2
                                        ],
                                        dtype=tf.float32,
                                        trainable=False)
                # LBank=tf.get_variable("LBank",initializer=tf.cast(LBankInit, tf.float32),dtype=tf.float32)

        init_new_vars_op = tf.variables_initializer([Bank, LBank])
        sess.run(init_new_vars_op)
    # ggg end Signal Bank stuff:

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(
        sess, train_filenames)
    # test_features, test_labels = srez_input.setup_inputs(sess, train_filenames,TestStuff=True)
    test_features = train_features
    test_labels = train_labels
    #test_features,  test_labels  = srez_input.setup_inputs(sess, test_filenames)

    print('starting' + time.strftime("%Y-%m-%d %H:%M:%S"))
    print('train_features %s' % (train_features))
    print('train_labels %s' % (train_labels))

    # Add some noise during training (think denoising autoencoders)
    noise_level = myParams.myDict['noise_level']
    AddNoise = noise_level > 0.0
    if AddNoise:
        noisy_train_features = train_features + tf.random_normal(
            train_features.get_shape(), stddev=noise_level)
    else:
        noisy_train_features = train_features

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
            srez_modelBase.create_model(sess, noisy_train_features, train_labels)

    # gene_VarNamesL=[];
    # for line in gene_var_list: gene_VarNamesL.append(line.name+'           ' + str(line.shape.as_list()))
    # gene_VarNamesL.sort()

    # for line in gene_VarNamesL: print(line)
    # # var_23 = [v for v in tf.global_variables() if v.name == "gene/GEN_L020/C2D_weight:0"][0]

    # for line in sess.graph.get_operations(): print(line)
    # Gen3_ops=[]
    # for line in sess.graph.get_operations():
    #     if 'GEN_L003' in line.name:
    #         Gen3_ops.append(line)

    #     # LL=QQQ.outputs[0]

    # for x in Gen3_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    # GenC2D_ops= [v for v in sess.graph.get_operations()]

    # GenC2D_ops= [v for v in tf.get_operations() if "weight" in v.name]
    # GenC2D_ops= [v for v in GenC2D_ops if "C2D" in v.name]
    # for x in GenC2D_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    # for x in GenC2D_ops: print(x.name)

    AEops = [
        v for v in sess.graph.get_operations()
        if "AE" in v.name and not ("_1/" in v.name)
    ]
    # AEops = [v for v in td.sess.graph.get_operations() if "Pixel" in v.name and not ("_1/" in v.name) and not ("opti" in v.name) and not ("Assign" in v.name) and not ("read" in v.name) and not ("Adam" in v.name)]
    AEouts = [v.outputs[0] for v in AEops]
    varsForL1 = AEouts
    # varsForL1=AEouts[0:-1]
    # varsForL1=AEouts[1:]

    # for line in sess.graph.get_operations():
    #     if 'GEN_L003' in line.name:
    #         Gen3_ops.append(line)

    #     # LL=QQQ.outputs[0]

    # for x in Gen3_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    print("Vars for l2 loss:")
    varws = [
        v for v in tf.global_variables()
        if (("weight" in v.name) or ("ConvNet" in v.name))
    ]
    varsForL2 = [v for v in varws if "C2D" in v.name]
    varsForL2 = [v for v in varws if "disc" not in v.name]
    varsForL2 = [v for v in varws if "bias" not in v.name]
    for line in varsForL2:
        print(line.name + '           ' + str(line.shape.as_list()))

    print("Vars for Phase-only loss:")
    varws = [v for v in tf.global_variables() if "weight" in v.name]
    varsForPhaseOnly = [v for v in varws if "SharedOverFeat" in v.name]
    for line in varsForPhaseOnly:
        print(line.name + '           ' + str(line.shape.as_list()))

    # pdb.set_trace()

    gene_loss, MoreOut, MoreOut2, MoreOut3 = srez_modelBase.create_generator_loss(
        disc_fake_output, gene_output, train_features, train_labels, varsForL1,
        varsForL2, varsForPhaseOnly)
    disc_real_loss, disc_fake_loss = \
                     srez_modelBase.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_modelBase.create_optimizers(gene_loss, gene_var_list, disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())

    #pdb.set_trace()
    # ggg: to restore session
    RestoreSession = False
    if RestoreSession:
        saver = tf.train.Saver()
        filename = 'checkpoint_new'
        filename = os.path.join(myParams.myDict['checkpoint_dir'], filename)
        saver.restore(sess, filename)

    srez_train.train_model(train_data)
Esempio n. 14
0
import os
import sys

# sys.path.insert(0, '/media/a/H2/home/a/TF/')
sys.path.insert(0, '/opt/data/TF/')

import time

import GTools as GT

# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# HomeA='/home/deni/'
HomeA = GT.getHome()

sys.path.insert(0, HomeA + 'TF/')

# DatasetsBase='/home/deni/'
DatasetsBase = GT.getDatasetsBase()

os.chdir(HomeA + 'TF/srezN')
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# os.environ['CUDA_VISIBLE_DEVICES'] = ''

import pdb

# import srez_demo
import srez_input
import srez_model
Esempio n. 15
0
import time

import GTools as GT

# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# HomeA=GT.getHome()
HomeA = os.getcwd()
# os.chdir(HomeA + 'TF/srez')
ParamFN = HomeA + '/Params.txt'

# DatasetsBase='/home/deni/'
DatasetsBase = GT.getDatasetsBase()

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# os.environ['CUDA_VISIBLE_DEVICES'] = ''

import pdb

# import srez_demo
import srez_input
import srez_model
import srez_train
import srez_modelBase

import os.path
import random
import numpy as np
Esempio n. 16
0
def _generator_model(sess, features, labels, channels):
    # Upside-down all-convolutional resnet

    mapsize = 3
    mapsize = myParams.myDict['MapSize']
    res_units = [256, 128, 96]

    old_vars = tf.global_variables()

    # See Arxiv 1603.05027
    model = Model('GEN', features)

    # H=FLAGS.LabelsH;
    # W=FLAGS.LabelsW;
    H = myParams.myDict['LabelsH']
    W = myParams.myDict['LabelsW']
    channelsOut = myParams.myDict['channelsOut']

    batch_size = myParams.myDict['batch_size']

    DataH = myParams.myDict['DataH']

    print("_generator_model")
    print("%d %d %d" % (H, W, channels))

    if myParams.myDict['NetMode'] == 'SPEN_Local':
        print("SPEN_Local mode")

        model.add_Split4thDim(2)  # now (16, H, W, HNeighbors, 2)
        model.add_PixelwiseMultC(1)  #,NamePrefix='MapsForMat')

        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SPEN_FC':
        print("SPEN_FC mode")
        model.add_5thDim()
        model.add_Permute45()

        model.add_Mult2DMCxC(H, 1)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASH1DFTxyC_YCC':
        print("SMASH1DFTxyC_YCC mode")

        # model.print_size('AAA') # (16, 64, 128, 16)
        model.add_Split4thDim(2)  # now (16, 64, 128, 8, 2)

        DFTM = GT.DFT_matrix(H)
        IDFTM = GT.IDFT_matrix(H)

        DFTM_Half = GT.DFT_matrix(64)
        IDFTM_Half = GT.IDFT_matrix(64)

        # back to image space on RO
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)
        # YCC: also on PE
        model.add_Mult2DMCxCSharedOverFeat(64,
                                           1,
                                           Trainable=False,
                                           InitC=IDFTM_Half)

        # CC part
        ncc = myParams.myDict['CC_channels']
        # CC: model.add_conv2dC(ncc,mapsize=1) # now (16, 64, 128, ncc, 2)
        model.add_einsumC('abcd,bcdx->abcx', [64, 128, 8, ncc])

        # back to k-space space on RO
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=DFTM)
        # YCC: also on PE
        model.add_Mult2DMCxCSharedOverFeat(64,
                                           1,
                                           Trainable=False,
                                           InitC=DFTM_Half)

        # now conv, from 8 to 2
        model.add_conv2dC(2, mapsize=3)  # now (16, 64, 128, 2, 2)
        # now put combine the 2 with the 64
        model.add_Permute([0, 1, 3, 2, 4])  # now (16, 64, 2, 128, 2)
        model.add_Reshape([16, 128, 128, 1, 2])

        # model.add_Mult2DMCxCSharedOverFeat(H,1,NamePrefix='MapsForMat')
        model.add_Mult2DMCxCSharedOverFeat(H, 1, Trainable=False, InitC=IDFTM)
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASH1DFTxyC_XCC':
        print("SMASH1DFTxyC_XCC mode")

        # model.print_size('AAA') # (16, 64, 128, 16)
        model.add_Split4thDim(2)  # now (16, 64, 128, 8, 2)

        DFTM = GT.DFT_matrix(H)
        IDFTM = GT.IDFT_matrix(H)

        # back to image space on RO
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)

        # CC part
        ncc = myParams.myDict['CC_channels']
        # CC: model.add_conv2dC(ncc,mapsize=1) # now (16, 64, 128, ncc, 2)
        model.add_einsumC('abcd,bcdx->abcx', [64, 128, 8, ncc])

        # back to k-space space on RO
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=DFTM)

        # now conv, from 8 to 2
        model.add_conv2dC(2, mapsize=3)  # now (16, 64, 128, 2, 2)
        # now put combine the 2 with the 64
        model.add_Permute([0, 1, 3, 2, 4])  # now (16, 64, 2, 128, 2)
        model.add_Reshape([16, 128, 128, 1, 2])

        # model.add_Mult2DMCxCSharedOverFeat(H,1,NamePrefix='MapsForMat')
        model.add_Mult2DMCxCSharedOverFeat(H, 1, Trainable=False, InitC=IDFTM)
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASH1DFTxyC_GCC':
        print("SMASH1DFTxyC_GCC mode")

        # model.print_size('AAA') # (16, 64, 128, 16)
        model.add_Split4thDim(2)  # now (16, 64, 128, 8, 2)

        DFTM = GT.DFT_matrix(H)
        IDFTM = GT.IDFT_matrix(H)

        # back to image space on RO
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)

        # CC part
        ncc = myParams.myDict['CC_channels']
        # CC: model.add_conv2dC(ncc,mapsize=1) # now (16, 64, 128, ncc, 2)
        model.add_einsumC('abcd,cdx->abcx', [128, 8, ncc])

        # back to k-space space on RO
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=DFTM)

        # now conv, from 8 to 2
        model.add_conv2dC(2, mapsize=3)  # now (16, 64, 128, 2, 2)
        # now put combine the 2 with the 64
        model.add_Permute([0, 1, 3, 2, 4])  # now (16, 64, 2, 128, 2)
        model.add_Reshape([16, 128, 128, 1, 2])

        # model.add_Mult2DMCxCSharedOverFeat(H,1,NamePrefix='MapsForMat')
        model.add_Mult2DMCxCSharedOverFeat(H, 1, Trainable=False, InitC=IDFTM)
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASH1DFTxyC_SCC':
        print("SMASH1DFTxyC_SCC mode")

        # model.print_size('AAA') # (16, 64, 128, 16)
        model.add_Split4thDim(2)  # now (16, 64, 128, 8, 2)

        DFTM = GT.DFT_matrix(H)
        IDFTM = GT.IDFT_matrix(H)

        # back to image space on RO
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)

        # CC part
        ncc = myParams.myDict['CC_channels']
        model.add_conv2dC(ncc, mapsize=1)  # now (16, 64, 128, ncc, 2)

        # back to k-space space on RO
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=DFTM)

        # now conv, from 8 to 2
        model.add_conv2dC(2, mapsize=3)  # now (16, 64, 128, 2, 2)
        # now put combine the 2 with the 64
        model.add_Permute([0, 1, 3, 2, 4])  # now (16, 64, 2, 128, 2)
        model.add_Reshape([16, 128, 128, 1, 2])

        # model.add_Mult2DMCxCSharedOverFeat(H,1,NamePrefix='MapsForMat')
        model.add_Mult2DMCxCSharedOverFeat(H, 1, Trainable=False, InitC=IDFTM)
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASH1DFTxyC':
        print("1DFTxyCMaps mode")

        # model.print_size('AAA') # (16, 64, 128, 16)
        model.add_Split4thDim(2)  # now (16, 64, 128, 8, 2)
        # now conv, from 8 to 2
        model.add_conv2dC(2, mapsize=3)  # now (16, 64, 128, 2, 2)
        # now put combine the 2 with the 64
        model.add_Permute([0, 1, 3, 2, 4])  # now (16, 64, 2, 128, 2)
        model.add_Reshape([16, 128, 128, 1, 2])

        IDFTM = GT.IDFT_matrix(H)

        # model.add_Mult2DMCxCSharedOverFeat(H,1,NamePrefix='MapsForMat')
        model.add_Mult2DMCxCSharedOverFeat(H, 1, Trainable=False, InitC=IDFTM)
        model.add_Mult2DMCyCSharedOverFeat(W, 1, Trainable=False, InitC=IDFTM)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == '1DFTxyCMaps':
        print("1DFTxyCMaps mode")

        # model.print_size('AAA') # (16, 128, 128, 16)
        model.add_Split4thDim(2)  # now (16, 128, 128, 8, 2)
        # model.print_size('CCC')
        # model.add_Permute45()

        model.add_Mult2DMCxCSharedOverFeat(H, 1, NamePrefix='MapsForMat')
        model.add_Mult2DMCyCSharedOverFeat(W, 1)
        model.add_PixelwiseMultC(1)  #,NamePrefix='MapsForMat')
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == '2DFTC':
        print("2DFTC mode")
        model.add_5thDim()
        model.add_Permute45()

        model.add_Mult2DMCxC(H * W, 1)
        model.remove_5thDim()
        model.add_reshapeTo4D(H, W)

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == '1DFTxyC':
        print("1DFTxyC mode")
        model.add_5thDim()
        model.add_Permute45()

        model.add_Mult2DMCxC(H, 1)
        model.add_Mult2DMCyC(W, 1)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == '1DFTxC':
        print("1DFTxC mode")
        model.add_5thDim()
        model.add_Permute45()

        model.add_Mult2DMCxC(H, 1)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == '1DFTyC':
        print("1DFTyC mode")
        model.add_5thDim()
        model.add_Permute45()

        model.add_Mult2DMCyC(W, 1)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == '1DFTy':
        print("1DFTy mode")
        model.add_Mult2DMCy(W, channelsOut)

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == '1DFTx':
        print("1DFTx mode")
        model.add_Mult2DMCx(H, channelsOut)

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == '2DFT':
        print("2DFT mode")
        model.add_Mult2DMCy(W, channelsOut)
        model.add_Mult2DMCx(H, channelsOut)

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    # if myParams.myDict['NetMode'] == 'RegridTry1':
    #     print("RegridTry1 mode")
    #     model.add_PixelwiseMult(2, stddev_factor=1.0)
    #     model.add_Mult2DMCy(W,channelsOut)
    #     model.add_Mult2DMCx(H,channelsOut)

    #     new_vars  = tf.global_variables()
    #     gene_vars = list(set(new_vars) - set(old_vars))
    #     return model.get_output(), gene_vars

    # if myParams.myDict['NetMode'] == 'RegridTry1C':
    #     print("RegridTry1C mode")
    #     addBias=myParams.myDict['CmplxBias']>0
    #     if addBias:
    #         print("with bias")
    #     else:
    #         print("without bias")
    #     model.add_PixelwiseMult(2, stddev_factor=1.0)
    #     model.add_5thDim()
    #     model.add_Permute45()
    #     model.add_Mult2DMCyC(W,1,add_bias=addBias)
    #     model.add_Mult2DMCxC(H,1,add_bias=addBias)
    #     model.remove_5thDim()

    #     new_vars  = tf.global_variables()
    #     gene_vars = list(set(new_vars) - set(old_vars))
    #     return model.get_output(), gene_vars

    # if myParams.myDict['NetMode'] == 'RegridTry1C2':
    #     print("RegridTry1C2 mode")
    #     addBias=myParams.myDict['CmplxBias']>0
    #     if addBias:
    #         print("with bias")
    #     else:
    #         print("without bias")
    #     model.add_Split4thDim(2)
    #     model.add_PixelwiseMultC(1, stddev_factor=1.0)
    #     model.add_Mult2DMCyC(W,1,add_bias=addBias)
    #     model.add_Mult2DMCxC(H,1,add_bias=addBias)
    #     model.remove_5thDim()

    #     new_vars  = tf.global_variables()
    #     gene_vars = list(set(new_vars) - set(old_vars))
    #     return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'RegridTry3C2_TS_WithTSB':
        print("RegridTry3C2_TS_WithTSB mode")
        FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
        NMapCR = FullData['NMapCR']
        NMapCR = tf.constant(NMapCR)

        aDataH = myParams.myDict['aDataH']
        aDataW = myParams.myDict['aDataW']
        achannelsIn = myParams.myDict['achannelsIn']
        nTS = myParams.myDict['nTimeSegments']
        nccInData = myParams.myDict['nccInData']
        nTraj = myParams.myDict['nTraj']
        HalfDataH = np.int32(DataH / 2)
        # 133068/2 =  66534
        # model.print_shape('Start') # now 16,133068,1,1
        # model.add_Permute([0,2,3,1])
        # model.add_Split4thDim(2) # now 16,1,1,133068/2,2C
        model.add_Reshape([batch_size, 1, 1, HalfDataH, 2])
        # Now do TSB
        model.add_Permute([0, 3, 2, 1, 4])  # now 16,133068/2,1,1,2C
        model.add_Reshape([batch_size, nTraj, nccInData, 1, 2])
        model.add_Permute([0, 2, 1, 3, 4])  # now 16 13 5118 1 2
        model.add_Reshape([batch_size * nccInData, 1, nTraj, 1, 2])
        model.add_PixelwiseMultC(
            nTS, stddev_factor=1.0)  # This is TSB. After: 16*13,1,5118,nTS,2
        model.add_Reshape([batch_size, nccInData, nTraj, nTS, 2])
        model.add_Permute([2, 1, 4, 0, 3])  # now 5118 13 2 16 nTS
        model.add_Reshape([nTraj * nccInData * 2, batch_size * nTS, 1, 1])

        # model.add_Permute([1,0,2,3])
        # model.print_shape()

        feature = model.get_output()

        feature = tf.gather(feature, NMapCR, validate_indices=None, name=None)
        # feature = tf.reshape(feature, [aDataH, aDataW, achannelsIn])
        model.add_PutInOutput(feature)  # After we're 131,131,192,16*nTS

        model.add_Permute([3, 0, 1, 2, 4, 5])  # Now 16*nTS,131,131,192,1,1

        # model.add_Reshape([batch_size,nTS,aDataH,aDataW,2,96])

        # model.print_shape()
        model.add_Reshape([batch_size * nTS, aDataH, aDataW,
                           achannelsIn])  # Now 16*nTS,131,131,192

        UseSharedWightesInRelaxedFT = myParams.myDict[
            'UseSharedWightesInRelaxedFT'] > 0
        addBias = myParams.myDict['CmplxBias'] > 0
        if addBias:
            print("with bias")
        else:
            print("without bias")

        model.add_Split4thDim(
            2)  # Now we're batch_size*nTS, kH,kW, Neighbors(12)*Channels(8),2C

        model.add_PixelwiseMultC(
            1, stddev_factor=1.0)  # After we're batch_size*nTS,kH,kW,1,2C

        # AfterRegrid_ForOut = tf.identity(model.get_output(), name="AfterRegrid_ForOut")
        # model.add_PutInOutput(AfterRegrid_ForOut)

        model.add_Reshape([batch_size, nTS, aDataH, aDataW, 2])
        model.add_Permute([0, 2, 3, 1,
                           4])  # After we're batch_size,kH,kW,nTS, 2C

        # AfterRegridP_ForOut = tf.identity(model.get_output(), name="AfterRegridP_ForOut")
        # model.add_PutInOutput(AfterRegridP_ForOut)
        # Now continuing as with no TSB

        MM = GT.gDFT_matrix(np.linspace(-50, 50, aDataH), H)
        MM = np.transpose(MM, axes=[1, 0])

        if UseSharedWightesInRelaxedFT:
            model.add_Mult2DMCyCSharedOverFeat(W,
                                               1,
                                               add_bias=addBias,
                                               Trainable=False,
                                               InitC=MM,
                                               NamePrefix='FTy')
            model.add_Mult2DMCxCSharedOverFeat(H,
                                               1,
                                               add_bias=addBias,
                                               Trainable=False,
                                               InitC=MM,
                                               NamePrefix='FTx')
        else:
            model.add_Mult2DMCyC(W, 1, add_bias=addBias)
            model.add_Mult2DMCxC(H, 1, add_bias=addBias)

        # AfterFT_ForOut = tf.identity(model.get_output(), name="AfterFT_ForOut")
        # model.add_PutInOutput(AfterFT_ForOut)

        # now supposedly batch_size,H,W,nTS
        model.add_PixelwiseMultC(
            1, stddev_factor=1.0, NamePrefix='TSC'
        )  # This collecting the different TS to the final image.

        # AfterTSC_ForOut = tf.identity(model.get_output(), name="AfterTSC_ForOut")
        # model.add_PutInOutput(AfterTSC_ForOut)

        model.remove_5thDim()

        # EndForOut = tf.identity(model.get_output(), name="EndForOut")
        # model.add_PutInOutput(EndForOut)

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'RegridTry3C2_TS':
        print("RegridTry3C2_TS mode")

        aDataH = myParams.myDict['aDataH']
        kMax = myParams.myDict['kMax']
        aDataW = myParams.myDict['aDataW']
        # achannelsIn=myParams.myDict['achannelsIn']

        nTS = myParams.myDict['nTimeSegments']
        UseSharedWightesInRelaxedFT = myParams.myDict[
            'UseSharedWightesInRelaxedFT'] > 0
        RelaxedFT = myParams.myDict['RelaxedFT'] > 0
        addBias = myParams.myDict['CmplxBias'] > 0

        # FullData=scipy.io.loadmat(myParams.myDict['NMAP_FN'])
        # NMapCR=FullData['NMapCR']
        # NMapCR = tf.constant(NMapCR)
        nccInData = myParams.myDict['nccInData']
        # ncc=8
        ncc = myParams.myDict['nccToUse']

        nNeighbors = myParams.myDict['nNeighbors']

        achannelsIn = ncc * nNeighbors * 2
        # T=scipy.io.loadmat('/media/a/DATA/180628_AK/meas_MID244_gBP_VD11_U19_G35S155_4min_FID22439/Traj.mat')
        # Traj=T['Traj'][0:2,:]

        # BaseNUFTDataP='/media/a/DATA/13May18/Me/meas_MID409_gBP_VD11_U19_7ADCs_FID17798/'
        # BaseNUFTDataP='/media/a/DATA/11Jul18/RL/meas_MID149_gBP_VD11_U19_G35S155_FID23846/'
        BaseNUFTDataP = myParams.myDict['BaseNUFTDataP']
        NUFTData = scipy.io.loadmat(BaseNUFTDataP + 'TrajForNUFT.mat')
        Traj = NUFTData['Trajm2'][0:2, :]

        # T=scipy.io.loadmat('/media/a/DATA/13May18/Me/meas_MID409_gBP_VD11_U19_7ADCs_FID17798/TrajForNUFT.mat')
        # Traj=T['Trajm2'][0:2,:]

        NMapCR = GT.GenerateNeighborsMap(Traj, kMax, aDataH, nccInData, ncc,
                                         nNeighbors)
        NMapCR = tf.constant(NMapCR)

        # nNeighbors=myParams.myDict['nNeighbors']
        # nccInData=myParams.myDict['nccInData']

        # CurBartTraj=scipy.io.loadmat('/media/a/DATA/180628_AK/meas_MID244_gBP_VD11_U19_G35S155_4min_FID22439/Traj.mat')
        # CurBartTraj=CurBartTraj['BARTTrajMS'][0:2,2:]

        # osN=aDataH
        # nNeighbors=nNeighbors
        # NMap=np.zeros([osN,osN,nNeighbors],dtype='int32')

        # C=GT.linspaceWithHalfStep(-kMax,kMax,osN)

        # nChToUseInNN=nccInData
        # ncc=nccInData
        # nTrajAct=CurBartTraj.shape[1]

        # for i in np.arange(0,osN):
        #     for j in np.arange(0,osN):

        #         CurLoc=np.vstack([C[i], C[j]])

        #         D=CurBartTraj-CurLoc
        #         R=np.linalg.norm(D,ord=2,axis=0)/np.sqrt(2)
        #         Idx=np.argsort(R)

        #         NMap[i,j,:]=Idx[0:nNeighbors]

        # a=np.reshape(np.arange(0,nChToUseInNN)*nTrajAct,(1,1,1,nChToUseInNN))
        # NMapC=np.reshape(NMap,(NMap.shape[0],NMap.shape[1],NMap.shape[2],1))+a
        # NMapC=np.transpose(NMapC,(0,1,2,3))
        # NMapCX=np.reshape(NMapC,(osN,osN,nNeighbors*nChToUseInNN))
        # NMapCR=np.concatenate((NMapCX,NMapCX+nTrajAct*ncc),axis=2)

        # model.print_shape()

        # model.add_Reshape([16*133068])
        model.add_Permute([1, 0, 2, 3])  # now we're 133068,16,1,1
        # model.print_shape()

        feature = model.get_output()

        feature = tf.gather(feature, NMapCR, validate_indices=None,
                            name=None)  # After 131,131,192,16
        # feature = tf.reshape(feature, [aDataH, aDataW, achannelsIn])
        model.add_PutInOutput(feature)

        model.add_Permute([3, 0, 1, 2, 4, 5])  # After 16,131,131,192,1,1

        # model.print_shape()
        model.add_Reshape([batch_size, aDataH, aDataW,
                           achannelsIn])  # After 16,131,131,192

        model.add_Split4thDim(
            2)  # Now we're kH,kW, Neighbors(12)*Channels(8),2

        # model.add_PixelwiseMultC(nTS, stddev_factor=1.0) # After we're batch_size,kH,kW,nTS
        InitForRC = []
        if myParams.myDict['InitForRFN'] != 'None':
            InitForRM = scipy.io.loadmat(myParams.myDict['InitForRFN'])
            InitForRR = InitForRM['gene_GEN_L007_PixelwiseMultC_weightR_0']
            InitForRI = InitForRM['gene_GEN_L007_PixelwiseMultC_weightI_0']
            InitForRC = InitForRR + 1j * InitForRI
        model.add_PixelwiseMultC(nTS,
                                 stddev_factor=1.0,
                                 NamePrefix='',
                                 Trainable=True,
                                 InitC=InitForRC)

        MM = GT.gDFT_matrix(np.linspace(-kMax, kMax, aDataH), H)
        MM = np.transpose(MM, axes=[1, 0])

        if UseSharedWightesInRelaxedFT:
            model.add_Mult2DMCyCSharedOverFeat(W,
                                               1,
                                               add_bias=addBias,
                                               Trainable=RelaxedFT,
                                               InitC=MM)
            model.add_Mult2DMCxCSharedOverFeat(H,
                                               1,
                                               add_bias=addBias,
                                               Trainable=RelaxedFT,
                                               InitC=MM)
        else:
            model.add_Mult2DMCyC(W, 1, add_bias=addBias)
            model.add_Mult2DMCxC(H, 1, add_bias=addBias)

            # now supposedly batch_size,H,W,nTS
        # model.add_PixelwiseMultC(1, stddev_factor=1.0) # This collecting the different TS to the final image.
        InitForLC = []
        if myParams.myDict['InitForLFN'] != 'None':
            InitForLM = scipy.io.loadmat(myParams.myDict['InitForLFN'])
            InitForLR = InitForLM['gene_GEN_L010_PixelwiseMultC_weightR_0']
            InitForLI = InitForLM['gene_GEN_L010_PixelwiseMultC_weightI_0']
            InitForLC = InitForLR + 1j * InitForLI
        model.add_PixelwiseMultC(
            1,
            stddev_factor=1.0,
            NamePrefix='',
            Trainable=True,
            InitC=InitForLC
        )  # This collecting the different TS to the final image.

        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'RegridTry3C2_TS_MB':
        print("RegridTry3C2_TS_MB mode")

        aDataH = myParams.myDict['aDataH']
        kMax = myParams.myDict['kMax']
        aDataW = myParams.myDict['aDataW']
        # achannelsIn=myParams.myDict['achannelsIn']

        nTS = myParams.myDict['nTimeSegments']
        UseSharedWightesInRelaxedFT = myParams.myDict[
            'UseSharedWightesInRelaxedFT'] > 0
        RelaxedFT = myParams.myDict['RelaxedFT'] > 0
        addBias = myParams.myDict['CmplxBias'] > 0

        nccInData = myParams.myDict['nccInData']

        ncc = myParams.myDict['nccToUse']

        nNeighbors = myParams.myDict['nNeighbors']

        achannelsIn = ncc * nNeighbors * 2

        BaseNUFTDataP = myParams.myDict['BaseNUFTDataP']
        NUFTData = scipy.io.loadmat(BaseNUFTDataP + 'TrajForNUFT.mat')
        Traj = NUFTData['Trajm2'][0:2, :]

        NMapCR = GT.GenerateNeighborsMap(Traj, kMax, aDataH, nccInData, ncc,
                                         nNeighbors)
        NMapCR = tf.constant(NMapCR)

        model.add_Permute([1, 0, 2, 3])  # now we're 133068,16,1,1
        # model.print_shape()

        feature = model.get_output()

        feature = tf.gather(feature, NMapCR, validate_indices=None,
                            name=None)  # After 131,131,192,16
        # feature = tf.reshape(feature, [aDataH, aDataW, achannelsIn])
        model.add_PutInOutput(feature)

        model.add_Permute([3, 0, 1, 2, 4, 5])  # After 16,131,131,192,1,1

        # model.print_shape()
        model.add_Reshape([batch_size, aDataH, aDataW,
                           achannelsIn])  # After 16,131,131,192

        if addBias:
            print("with bias")
        else:
            print("without bias")

        model.add_Split4thDim(
            2)  # Now we're kH,kW, Neighbors(12)*Channels(8),2

        # model.add_PixelwiseMultC(nTS, stddev_factor=1.0) # After we're batch_size,kH,kW,nTS
        InitForRC = []
        if myParams.myDict['InitForRFN'] != 'None':
            InitForRM = scipy.io.loadmat(myParams.myDict['InitForRFN'])
            InitForRR = InitForRM['gene_GEN_L007_PixelwiseMultC_weightR_0']
            InitForRI = InitForRM['gene_GEN_L007_PixelwiseMultC_weightI_0']
            InitForRC = InitForRR + 1j * InitForRI
        model.add_PixelwiseMultC(nTS,
                                 stddev_factor=1.0,
                                 NamePrefix='',
                                 Trainable=True,
                                 InitC=InitForRC)

        MM = GT.gDFT_matrix(np.linspace(-kMax, kMax, aDataH), H)
        MM = np.transpose(MM, axes=[1, 0])

        if UseSharedWightesInRelaxedFT:
            model.add_Mult2DMCyCSharedOverFeat(W,
                                               1,
                                               add_bias=addBias,
                                               Trainable=RelaxedFT,
                                               InitC=MM)
            model.add_Mult2DMCxCSharedOverFeat(H,
                                               1,
                                               add_bias=addBias,
                                               Trainable=RelaxedFT,
                                               InitC=MM)
        else:
            model.add_Mult2DMCyC(W, 1, add_bias=addBias)
            model.add_Mult2DMCxC(H, 1, add_bias=addBias)

            # now supposedly batch_size,H,W,nTS
        # ggg: 2 here is MB
        # model.add_PixelwiseMultC(2, stddev_factor=1.0) # This collecting the different TS to the final image.
        # model.print_shape('BeforeL')

        InitForLC = []
        if myParams.myDict['InitForLFN'] != 'None':
            InitForLM = scipy.io.loadmat(myParams.myDict['InitForLFN'])
            InitForLR = InitForLM['gene_GEN_L010_PixelwiseMultC_weightR_0']
            InitForLI = InitForLM['gene_GEN_L010_PixelwiseMultC_weightI_0']
            InitForLC = InitForLR + 1j * InitForLI
        model.add_PixelwiseMultC(
            2,
            stddev_factor=1.0,
            NamePrefix='',
            Trainable=True,
            InitC=InitForLC
        )  # This collects the different TS to the final image.

        # model.print_shape('AfterL')

        model.add_Permute34()
        model.add_Combine34(True)

        # model.print_shape('After Combine34')

        # model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'RegridTry1C2_TS':
        print("RegridTry1C2_TS mode")
        aDataH = myParams.myDict['aDataH']
        kMax = myParams.myDict['kMax']

        nTS = myParams.myDict['nTimeSegments']
        UseSharedWightesInRelaxedFT = myParams.myDict[
            'UseSharedWightesInRelaxedFT'] > 0
        RelaxedFT = myParams.myDict['RelaxedFT'] > 0
        addBias = myParams.myDict['CmplxBias'] > 0
        model.add_Split4thDim(2)
        # model.add_PixelwiseMultC(nTS, stddev_factor=1.0) # After we're batch_size,kH,kW,nTS
        InitForRC = []
        print("InitForRC...")
        print(myParams.myDict['InitForRFN'])
        if myParams.myDict['InitForRFN'] != 'None':
            print("InitForRC From file")
            InitForRM = scipy.io.loadmat(myParams.myDict['InitForRFN'])
            InitForRR = InitForRM['gene_GEN_L007_PixelwiseMultC_weightR_0']
            InitForRI = InitForRM['gene_GEN_L007_PixelwiseMultC_weightI_0']
            InitForRC = InitForRR + 1j * InitForRI
        print("InitForRC...")
        model.add_PixelwiseMultC(nTS,
                                 stddev_factor=1.0,
                                 NamePrefix='',
                                 Trainable=True,
                                 InitC=InitForRC)

        MM = GT.gDFT_matrix(np.linspace(-kMax, kMax, aDataH), H)
        MM = np.transpose(MM, axes=[1, 0])

        if UseSharedWightesInRelaxedFT:
            model.add_Mult2DMCyCSharedOverFeat(W,
                                               1,
                                               add_bias=addBias,
                                               Trainable=RelaxedFT,
                                               InitC=MM)
            model.add_Mult2DMCxCSharedOverFeat(H,
                                               1,
                                               add_bias=addBias,
                                               Trainable=RelaxedFT,
                                               InitC=MM)
        else:
            model.add_Mult2DMCyC(W, 1, add_bias=addBias)
            model.add_Mult2DMCxC(H, 1, add_bias=addBias)

        # now supposedly batch_size,H,W,nTS

        # model.add_PixelwiseMultC(1, stddev_factor=1.0) # This collecting the different TS to the final image.
        # add_PixelwiseMultC(self, numOutChannels, stddev_factor=1.0,NamePrefix='',Trainable=True,InitC=[]):
        InitForLC = []
        if myParams.myDict['InitForLFN'] != 'None':
            InitForLM = scipy.io.loadmat(myParams.myDict['InitForLFN'])
            InitForLR = InitForLM['gene_GEN_L010_PixelwiseMultC_weightR_0']
            InitForLI = InitForLM['gene_GEN_L010_PixelwiseMultC_weightI_0']
            InitForLC = InitForLR + 1j * InitForLI
        model.add_PixelwiseMultC(
            1,
            stddev_factor=1.0,
            NamePrefix='',
            Trainable=True,
            InitC=InitForLC
        )  # This collecting the different TS to the final image.

        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    # if myParams.myDict['NetMode'] == 'RegridTry1C2_TS2': # Shared features in relaxed FT
    #     print("RegridTry1C2_TS mode")
    #     addBias=myParams.myDict['CmplxBias']>0
    #     if addBias:
    #         print("with bias")
    #     else:
    #         print("without bias")
    #     nTS=7
    #     model.add_Split4thDim(2)
    #     model.add_PixelwiseMultC(nTS, stddev_factor=1.0)
    #     model.add_Mult2DMCyCSharedOverFeat(W,1,add_bias=addBias)
    #     model.add_Mult2DMCxCSharedOverFeat(H,1,add_bias=addBias)
    #     model.add_PixelwiseMultC(1, stddev_factor=1.0)
    #     model.remove_5thDim()

    #     new_vars  = tf.global_variables()
    #     gene_vars = list(set(new_vars) - set(old_vars))
    #     return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASHTry1':
        print("SMASHTry1 mode")
        addBias = myParams.myDict['CmplxBias'] > 0

        model.add_PixelwiseMultC(2, stddev_factor=1.0)
        model.add_Combine34()
        model.add_Mult2DMCyC(W, 1, add_bias=addBias)
        model.add_Mult2DMCxC(H, 1, add_bias=addBias)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASHTry1_CC':
        print("SMASHTry1_CC mode")
        addBias = myParams.myDict['CmplxBias'] > 0
        # we're [Batch, kH,kW,AllChannels*Neighbors*RI]
        model.add_Split4thDim(2)  # Now [Batch, kH,kW,AllChannels*Neighbors,RI]

        model.add_Mult2DMCxCSharedOverFeat(
            DataH, 1)  # Now [Batch, H,kW,AllChannels*Neighbors,RI]
        model.add_Split4thDim(6)  # Now [Batch, H,kW,AllChannels,Neighbors,RI]

        ncc = 4
        model.add_einsumC('abcde,dx->abcxe', [8, ncc])

        model.add_Combine45(
            squeeze=True)  # Now [Batch, H,kW,CompressedChannels*Neighbors,RI]
        model.add_Mult2DMCxCSharedOverFeat(
            DataH, 1)  # Now [Batch, kH,kW,CompressedChannels*Neighbors,RI]

        model.add_PixelwiseMultC(2, stddev_factor=1.0)
        model.add_Combine34()
        model.add_Mult2DMCyC(W, 1, add_bias=addBias)
        model.add_Mult2DMCxC(H, 1, add_bias=addBias)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASHTry1_GCC':
        print("SMASHTry1_GCC mode")
        addBias = myParams.myDict['CmplxBias'] > 0
        # we're [Batch, kH,kW,AllChannels*Neighbors*RI]
        model.add_Split4thDim(2)  # Now [Batch, kH,kW,AllChannels*Neighbors,RI]

        model.add_Mult2DMCxCSharedOverFeat(
            DataH, 1)  # Now [Batch, H,kW,AllChannels*Neighbors,RI]
        model.add_Split4thDim(6)  # Now [Batch, H,kW,AllChannels,Neighbors,RI]

        ncc = 4
        model.add_einsumC('abcde,bdx->abcxe', [DataH, 8, ncc])

        model.add_Combine45(
            squeeze=True)  # Now [Batch, H,kW,CompressedChannels*Neighbors,RI]
        model.add_Mult2DMCxCSharedOverFeat(
            DataH, 1)  # Now [Batch, kH,kW,CompressedChannels*Neighbors,RI]

        model.add_PixelwiseMultC(2, stddev_factor=1.0)
        model.add_Combine34()
        model.add_Mult2DMCyC(W, 1, add_bias=addBias)
        model.add_Mult2DMCxC(H, 1, add_bias=addBias)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'SMASHTry1_GCCF':
        print("SMASHTry1_GCCF mode")
        addBias = myParams.myDict['CmplxBias'] > 0
        # we're [Batch, kH,kW,AllChannels*Neighbors*RI]
        model.add_Split4thDim(2)  # Now [Batch, H,kW,AllChannels*Neighbors,RI]

        model.add_Split4thDim(6)  # Now [Batch, H,kW,AllChannels,Neighbors,RI]

        ncc = 4
        model.add_einsumC('abcde,bdx->abcxe', [DataH, 8, ncc])

        model.add_Combine45(
            squeeze=True)  # Now [Batch, H,kW,CompressedChannels*Neighbors,RI]

        DFTM = DFT_matrix(DataH)
        model.add_Mult2DMCxCSharedOverFeat(
            DataH, 1, add_bias=addBias, Trainable=False,
            InitC=DFTM)  # Now [Batch, kH,kW,CompressedChannels*Neighbors,RI]

        model.add_PixelwiseMultC(2, stddev_factor=1.0)
        model.add_Combine34()
        model.add_Mult2DMCyC(W, 1, add_bias=addBias)
        model.add_Mult2DMCxC(H, 1, add_bias=addBias)
        model.remove_5thDim()

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'Conv_3Layers':
        print("Conv_3Layers mode")
        # model.print_shape()
        model.add_conv2d(64, mapsize=mapsize, stride=1, stddev_factor=2.)
        model.add_elu()
        model.add_conv2dWithName(32,
                                 name="ggg",
                                 mapsize=1,
                                 stride=1,
                                 stddev_factor=2.)
        model.add_elu()
        model.add_conv2d(channelsOut, mapsize=5, stride=1, stddev_factor=2.)

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'Unet_v1_ForB0':
        b = np.array([[64, 0, 0, 128, 128, 0, 0, 64],
                      [128, 0, 0, 256, 256, 0, 0, 128],
                      [512, 0, 0, 0, 0, 0, 0, 512]])
        model.add_UnetKsteps(b, mapsize=mapsize, stride=2, stddev_factor=1e-3)

        # OutChannels=labels.shape[3]

        model.add_conv2d(channelsOut,
                         mapsize=mapsize,
                         stride=1,
                         stddev_factor=2.)

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))

        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'Conv_v1_ForB0':
        print("Conv_v1_ForB0 mode")
        # model.print_shape()
        model.add_conv2d(64, mapsize=mapsize, stride=1, stddev_factor=2.)
        model.add_elu()
        model.add_conv2dWithName(128,
                                 name="ggg",
                                 mapsize=mapsize,
                                 stride=1,
                                 stddev_factor=2.)
        model.add_elu()
        model.add_conv2d(128, mapsize=mapsize, stride=1, stddev_factor=2.)
        model.add_elu()
        model.add_conv2d(channelsOut,
                         mapsize=mapsize,
                         stride=1,
                         stddev_factor=2.)
        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    if myParams.myDict['NetMode'] == 'Conv_v1':
        print("Conv_v1 mode")
        # model.print_shape()
        model.add_conv2d(64, mapsize=mapsize, stride=1, stddev_factor=2.)
        model.add_elu()
        model.add_conv2dWithName(128,
                                 name="ggg",
                                 mapsize=mapsize,
                                 stride=1,
                                 stddev_factor=2.)
        model.add_elu()
        model.add_conv2d(128, mapsize=mapsize, stride=1, stddev_factor=2.)
        model.add_elu()
        model.add_conv2d(1, mapsize=mapsize, stride=1, stddev_factor=2.)

    # SAE
    SAE = myParams.myDict['NetMode'] == 'SAE'
    if SAE:
        model.add_conv2d(64, mapsize=mapsize, stride=1, stddev_factor=2.)
        model.add_elu()
        model.add_conv2dWithName(128,
                                 name="AE",
                                 mapsize=mapsize,
                                 stride=1,
                                 stddev_factor=2.)

        model.add_conv2d(64, mapsize=mapsize, stride=1, stddev_factor=2.)
        model.add_elu()
        model.add_conv2d(channels, mapsize=7, stride=1, stddev_factor=2.)
        model.add_sigmoid()
        # model.add_tanh()

    # kKick:
    kKick = myParams.myDict['NetMode'] == 'kKick'
    if kKick:
        model.add_conv2d(64, mapsize=1, stride=1, stddev_factor=2.)
        model.add_elu()
        b = np.array([[64, 0, 0, 128, 128, 0, 0, 64],
                      [128, 0, 0, 256, 256, 0, 0, 128],
                      [512, 0, 0, 0, 0, 0, 0, 512]])
        model.add_UnetKsteps(b, mapsize=mapsize, stride=2, stddev_factor=1e-3)
        model.add_conv2dWithName(50,
                                 name="AE",
                                 mapsize=3,
                                 stride=1,
                                 stddev_factor=2.)
        model.add_elu()
        model.add_conv2d(channels, mapsize=1, stride=1, stddev_factor=2.)

        new_vars = tf.global_variables()
        gene_vars = list(set(new_vars) - set(old_vars))
        return model.get_output(), gene_vars

    # AUTOMAP
    # AUTOMAP_units  = [64, 64, channels]
    # AUTOMAP_mapsize  = [5, 5, 7]

    # ggg option 1: FC
    # model.add_flatten() # FC1
    # model.add_dense(num_units=H*W*2)
    # model.add_reshapeTo4D(H,W)

    TSRECON = myParams.myDict['NetMode'] == 'TSRECON'
    if TSRECON:
        # ggg option 2: FC per channel, and then dot multiplication per pixel, then conv
        ChannelsPerCoil = myParams.myDict['NumFeatPerChannel']
        NumTotalFeat = myParams.myDict['NumTotalFeat']
        model.add_Mult2DMC(H * W, ChannelsPerCoil)
        model.add_reshapeTo4D(H, W)
        model.add_PixelwiseMult(NumTotalFeat, stddev_factor=1.0)
        model.add_elu()

        #model.add_denseFromM('piMDR')
        #model.add_reshapeTo4D(FLAGS.LabelsH,FLAGS.LabelsW)
        # #model.add_tanh() # FC2

        #model.add_Unet1Step(128, mapsize=5, stride=2, num_layers=2, stddev_factor=1e-3)
        #model.add_conv2d(channels, mapsize=5, stride=1, stddev_factor=2.)

        b = np.array([[64, 0, 0, 128, 128, 0, 0, 64],
                      [128, 0, 0, 256, 256, 0, 0, 128],
                      [512, 0, 0, 0, 0, 0, 0, 512]])
        #b=np.array([[64,0,0,128,128,0,0,64],[128,0,0,256,256,0,0,128]])
        #b=np.array([[64,0,0,0,0,0,0,64]])

        model.add_UnetKsteps(b, mapsize=mapsize, stride=2, stddev_factor=1e-3)
        # model.add_conv2d(channels, mapsize=1, stride=1, stddev_factor=2.)

        # ggg: Autoencode
        model.add_conv2dWithName(50,
                                 name="AE",
                                 mapsize=3,
                                 stride=1,
                                 stddev_factor=2.)
        model.add_elu()

        # ggg: Finish
        model.add_conv2d(channels, mapsize=1, stride=1, stddev_factor=2.)

        # #model.add_flatten()
        # #model.add_dense(num_units=H*W*1)
        # model.add_reshapeTo4D(FLAGS.LabelsH,FLAGS.LabelsW)
        # #model.add_batch_norm()
        # #model.add_tanh() # TC3

        # # model.add_conv2d(AUTOMAP_units[0], mapsize=AUTOMAP_mapsize[0], stride=1, stddev_factor=2.)
        # # model.add_batch_norm()
        # #model.add_relu()

        # #model.add_conv2d(AUTOMAP_units[1], mapsize=AUTOMAP_mapsize[1], stride=1, stddev_factor=2.)
        # # model.add_batch_norm()
        # #model.add_relu()

        # #model.add_conv2d(AUTOMAP_units[2], mapsize=AUTOMAP_mapsize[2], stride=1, stddev_factor=2.)
        # # model.add_conv2d(AUTOMAP_units[2], mapsize=1, stride=1, stddev_factor=2.)
        # # model.add_relu()

        #model.add_constMatMul()
        #for ru in range(len(res_units)-1):
        #    nunits  = res_units[ru]

        #    for j in range(2):
        #        model.add_residual_block(nunits, mapsize=mapsize)

        # Spatial upscale (see http://distill.pub/2016/deconv-checkerboard/)
        # and transposed convolution
        #    model.add_upscale()

        #    model.add_batch_norm()
        #    model.add_relu()
        #    model.add_conv2d_transpose(nunits, mapsize=mapsize, stride=1, stddev_factor=1.)

        # model.add_flatten()
        # model.add_dense(num_units=H*W*4)
        # model.add_reshapeTo4D(FLAGS.LabelsH,FLAGS.LabelsW)

        # #model.add_Mult2D()
        # #model.add_Mult3DComplexRI()

    SrezOrigImagePartModel = False
    if SrezOrigImagePartModel:
        nunits = res_units[0]
        for j in range(2):
            model.add_residual_block(nunits, mapsize=mapsize)
        #model.add_upscale()
        model.add_batch_norm()
        model.add_relu()
        model.add_conv2d_transpose(nunits,
                                   mapsize=mapsize,
                                   stride=1,
                                   stddev_factor=1.)

        nunits = res_units[1]
        for j in range(2):
            model.add_residual_block(nunits, mapsize=mapsize)
        #model.add_upscale()
        model.add_batch_norm()
        model.add_relu()
        model.add_conv2d_transpose(nunits,
                                   mapsize=mapsize,
                                   stride=1,
                                   stddev_factor=1.)

        # Finalization a la "all convolutional net"
        nunits = res_units[-1]
        model.add_conv2d(nunits, mapsize=mapsize, stride=1, stddev_factor=2.)
        # Worse: model.add_batch_norm()
        model.add_relu()

        model.add_conv2d(nunits, mapsize=1, stride=1, stddev_factor=2.)
        # Worse: model.add_batch_norm()
        model.add_relu()

        # Last layer is sigmoid with no batch normalization
        model.add_conv2d(channels, mapsize=1, stride=1, stddev_factor=1.)
        model.add_sigmoid()

    new_vars = tf.global_variables()
    gene_vars = list(set(new_vars) - set(old_vars))

    # ggg = tf.identity(model.get_output(), name="ggg")

    return model.get_output(), gene_vars