コード例 #1
0
ファイル: vis.py プロジェクト: amritsaha607/BTP
def plotArea(r,
             eps,
             lambd,
             figs=['sca', 'abs', 'tot'],
             overlay=False,
             label_mode=['r1', 'r2'],
             size='auto',
             debug=True,
             title='auto'):
    """
        Function to plot area w.r.t. varying wavelength for different parameters
        Args:
            r : (dict) radius data
            eps : (dict) epsilon data
            lamd : (float array) varying wavelength data
            figs : (list) figures to show
                    'sca': scattering area
                    'abs': absorption area
                    'tot': total area
            overlay : (bool) To overlap the plots for different parameters or not
            label_mode : (list) label with r1 or r2 or both values
            size : figure size ('auto' for auto adjustments)
            debug : set True while plotting in notebook
                    set False if you wanna get the figure object
    """

    n_rows = 1 if overlay else len(figs)
    n_cols = len(figs) if overlay else len(r['r2'])

    # Generate size for 'auto' size mode
    if size == 'auto':
        if overlay:
            size = (n_cols * 5, 4)
        else:
            size = (n_cols * 5, n_rows * 4)

    fig, ax = plt.subplots(n_rows, n_cols, figsize=size)

    # Make plots
    for col, (r1, r2) in enumerate(zip(r['r1'], r['r2'])):
        r_cp = r.copy()
        r_cp['r1'] = r1
        r_cp['r2'] = r2
        area_sca, area_abs = getArea(r_cp, eps, lambd)

        for row in range(len(figs)):
            if overlay:
                ax_ = ax[row] if n_cols > 1 else ax
            else:
                ax_ = ax[row][col] if n_rows > 1 else ax[col]

            label_ = []
            if 'r1' in label_mode:
                label_.append("r1: {}nm".format(int(r1 * 1e9)))
            if 'r2' in label_mode:
                label_.append("r2: {}nm".format(int(r2 * 1e9)))
            label_ = ', '.join(label_)

            if figs[row] == 'sca':
                ax_.plot(lambd * 1e9, area_sca, label=label_)
            elif figs[row] == 'abs':
                ax_.plot(lambd * 1e9, area_abs, label=label_)
            elif figs[row] == 'tot':
                ax_.plot(lambd * 1e9, area_abs + area_sca, label=label_)
            else:
                raise ValueError("Unknown figure format '{}' found".format(
                    figs[row]))

            ax_.set_xlabel("Wavelength (nm)")
            ax_.set_ylabel("Cross Section (m2)")
            ax_.legend()

            title_ = title
            if overlay and title == 'auto':
                title_ = 'Total Cross Section'
                if figs[row] == 'abs':
                    title_ = 'Absorption Cross Section'
                elif figs[row] == 'sca':
                    title_ = 'Scattering Cross Section'

            ax_.set_title(title_)

    if debug:
        plt.show()

    else:
        plt.close()
        return fig
コード例 #2
0
ファイル: predict.py プロジェクト: amritsaha607/BTP
            data_factors=data_factors,
            ret_mode='abs',
        )

    for key in r_e1_data_x:
        r_e1_data_x[key] = torch.tensor(r_e1_data_x[key]).type(
            torch.FloatTensor)

elif isMode(mode, 'r'):
    x = []
    for [r1, r2] in r_data:
        r = {
            'r1': r1,
            'r2': r2,
        }
        A_sca, A_abs = getArea(r, eps, lambd)
        A_sca, A_abs = A_sca * data_factors['A'], A_abs * data_factors['A']
        x.append(A_abs)

    x = torch.tensor(np.array(x, dtype=np.float32))

# Checkpoint at different versions
if isMode(mode, 'e1_e2'):
    CHECKPOINT_DIR = f'checkpoints/domain_{domain}/{mode}/E1E2Data/{model_id}'
elif isMode(mode, 'e1'):
    CHECKPOINT_DIR = f'checkpoints/domain_{domain}/{mode}/E1Data/{model_id}'
elif isMode(mode, 'r'):
    CHECKPOINT_DIR = f'checkpoints/domain_{domain}/{mode}/MassData/{model_id}'

for version in sorted(os.listdir(CHECKPOINT_DIR)):
    print(version)
コード例 #3
0
ファイル: utils.py プロジェクト: amritsaha607/BTP
def getAreaE1E2Class(r,
                     e1_cls,
                     e2_cls,
                     lambd=None,
                     e1=None,
                     e2=None,
                     eps=None,
                     data_file='dataGeneration/csv/Au_interpolated_1.csv',
                     data_factors=None,
                     ret_mode='all'):
    """
        Returns cross sections from given r data, e1_cls & e2_cls
        Args:
            r : (raw data, given in nanometers)
                (dict) : returns single sample area
                (list of list) : returns multiple samples area as a list of np array
            e1_cls : string [material name of e1 class]
            e2_cls : string [material name of e2 class]
            lambd : wavelength array
            e1 : e1 array
            e2 : e2 array
            eps : eps dict [e1 => array, e2 => array, e3 => complex]
            data_file : file to read annotations from
            data_factors : factors to apply in cross sectional data
            ret_mode : 
                "all" : returns [A_sca, A_abs]
                "abs" : returns A_abs
                "sca" : returns A_sca
    """

    if data_factors is None:
        data_factors = defaultdict(lambda: 1)

    multi = False
    if isinstance(r, list):
        r = np.array(r)
        r = {
            'r1': r[:, 0],
            'r2': r[:, 1],
        }
        multi = True

    if not eps:

        if lambd is None:
            content = pd.read_csv(data_file)
            lambd = 1e-9 * content['wl'].values
            e2 = content['er'].values + 1j * content['ei'].values

        if e1 is None or e2 is None:
            from data.utils import PermittivityCalculator
            pc = PermittivityCalculator()

        if e1 is None:
            e1 = np.array([
                pc.getEps(wl_, element=e1_cls, mode="complex")
                for wl_ in lambd * 1e6
            ])

        if e2 is None:
            e2 = np.array([
                pc.getEpsArr(wl_, element=e2_cls, mode="complex")
                for wl_ in lambd * 1e6
            ])

        eps = {
            'e1': e1,
            'e2': e2,
            'e3': 1.0 + 1j * 0.0,
        }

    A_sca, A_abs = getArea(r, eps, lambd)
    A_sca, A_abs = A_sca * data_factors['A'], A_abs * data_factors['A']

    if multi:
        A_sca, A_abs = A_sca.T, A_abs.T

    if ret_mode == 'abs':
        return A_abs
    elif ret_mode == 'sca':
        return A_sca
    elif ret_mode == 'all':
        if multi:
            return [[A_sca_, A_abs_] for (A_sca_, A_abs_) in zip(A_sca, A_abs)]
        else:
            return [A_sca, A_abs]
    else:
        raise NameError("Unknown ret_mode found - {}".format(ret_mode))
コード例 #4
0
ファイル: utils.py プロジェクト: amritsaha607/BTP
def getAreaE1Class(r,
                   e1_cls,
                   lambd=None,
                   e1=None,
                   eps=None,
                   data_file='dataGeneration/csv/Au_interpolated_1.csv',
                   data_factors=None,
                   ret_mode='all'):
    """
        Returns cross sections from given r data & e1_cls
        Args:
            r : 
                (dict) : returns single sample area
                (list of list) : returns multiple samples area as a list of np array
            e1_cls : string [material name of e1 class]
            lambd : wavelength array
            e1 : e1 array
            eps : eps dict [e1 => array, e2 => array, e3 => complex]
            data_file : file to read annotations from
            data_factors : factors to apply in cross sectional data
            ret_mode : 
                "all" : returns [A_sca, A_abs]
                "abs" : returns A_abs
                "sca" : returns A_sca
    """

    if isinstance(r, list):
        x = []
        n_r = len(r)

        # If eps is not calculated let's calculate it
        # so that we don't have to calculate it everytime in loop
        if not eps:
            e3 = 1.00 + 1j * 0.0

            if lambd is None:
                content = pd.read_csv(data_file)
                lambd = 1e-9 * content['wl'].values
                e2 = content['er'].values + 1j * content['ei'].values

            if e1 is None:
                from data.utils import PermittivityCalculator
                pc = PermittivityCalculator()
                e1 = np.array([
                    pc.getEps(wl_, element=e1_cls, mode="complex")
                    for wl_ in lambd * 1e6
                ])

            eps = {
                'e1': e1,
                'e2': e2,
                'e3': e3,
            }

        for i in range(n_r):
            tr = {
                'r1': r[i][0],
                'r2': r[i][1],
            }
            A_sca, A_abs = getAreaE1Class(tr,
                                          e1_cls,
                                          lambd=lambd,
                                          eps=eps,
                                          data_factors=data_factors)

            if ret_mode == 'abs':
                x.append(A_abs)
            elif ret_mode == 'sca':
                x.append(A_sca)
            else:
                x.append([A_sca, A_abs])

        return x

    if data_factors is None:
        data_factors = defaultdict(lambda: 1)

    if not eps:

        if lambd is None:
            content = pd.read_csv(data_file)
            lambd = 1e-9 * content['wl'].values
            e2 = content['er'].values + 1j * content['ei'].values

        if e1 is None:
            from data.utils import PermittivityCalculator
            pc = PermittivityCalculator()
            e1 = np.array([
                pc.getEps(wl_, element=e1_cls, mode="complex")
                for wl_ in lambd * 1e6
            ])

        eps = {
            'e1': e1,
            'e2': e2,
            'e3': 1.0 + 1j * 0.0,
        }

    A_sca, A_abs = getArea(r, eps, lambd)
    # if r['r1'] == 10e-9 and r['r2'] == 20e-9:
    #     print(e1_cls)
    #     print(data_factors)
    #     print(A_abs[:5])
    #     print(A_abs.min(), A_abs.max())
    #     p
    A_sca, A_abs = A_sca * data_factors['A'], A_abs * data_factors['A']

    return A_sca, A_abs
コード例 #5
0
ファイル: train.py プロジェクト: amritsaha607/BTP
def validate(epoch, loader, metrics=[], 
            verbose=1, topups=['loss_split_re']):

    """
        epoch : Epoch no
        loader : Validation dataloader
        metrics : metrics to log
    """

    if isMode(mode, 'e1_e2_e3'):
        e1e2e3_losses = defaultdict(float) # key => "<e1_cls>,<e2_cls>,<e3_cls>", value => loss
        e1e2e3_loss_counts = defaultdict(float) # key => "<e1_cls>,<e2_cls>,<e3_cls>"
    elif isMode(mode, 'e1_e2'):
        e1e2_losses = defaultdict(float) # key => "<e1_cls>,<e2_cls>", value => loss
        e1e2_loss_counts = defaultdict(float) # key => "<e1_cls>,<e2_cls>"
    elif isMode(mode, 'e1'):
        e1_losses = defaultdict(float)
        e1_loss_counts = defaultdict(float)

    n = len(loader)
    tot_loss, loss_count = 0.0, 0
    if 'loss_split_re' in topups:
        tot_loss_split = None

    model.eval()
    for batch_idx, (x, y) in enumerate(loader):

        # y = getLabel(y, mode=mode)
        
        # For domain 2, seperate eps first
        if domain == 2:
            x, eps = x

        # For e1, e2 modes, break x into parts
        if isMode(mode, 'e1_e2_e3'):
            x, x_e1, x_e2, x_e3 = x
        elif isMode(mode, 'e1_e2'):
            x, x_e1, x_e2 = x
        elif isMode(mode, 'e1'):
            x, x_e1 = x

        if torch.cuda.is_available():
            x = x.cuda()
            y = y.cuda()
            if domain == 2:
                eps = {key: val.cuda() for key, val in eps.items()}

        # y is needed only in domain 0 & 1
        if domain != 2:
            y = transform_domain(y, domain=domain)

        if isMode(mode, 'e1_e2_e3'):
            y_pred = model(x, x_e1, x_e2, x_e3)
        elif isMode(mode, 'e1_e2'):
            y_pred = model(x, x_e1, x_e2)
        elif isMode(mode, 'e1'):
            y_pred = model(x, x_e1)
        else:
            y_pred = model(x)

        # For domain 2, x_pred is all you need to get loss
        if domain == 2:

            # Convert to SI units
            r = {
                'r1': y_pred[:, 0] / data_factors['r'],
                'r2': y_pred[:, 1] / data_factors['r'],
            }
            x_pred = getArea(r, eps, val_set.lambd, ret=input_key).T * data_factors['A']

            # Extract peak Information, already data factor applied in cross section
            lambd_max_pred, A_max_pred = getPeakInfo(x_pred, val_set.lambd)
            lambd_max, A_max = getPeakInfo(x, val_set.lambd)

            # Apply data factors in lambda_max
            lambd_max = lambd_max * data_factors['lambd']
            lambd_max_pred = lambd_max_pred * data_factors['lambd']

            lambd_max, lambd_max_pred = lambd_max.unsqueeze(dim=1), lambd_max_pred.unsqueeze(dim=1)
            A_max, A_max_pred = A_max.unsqueeze(dim=1), A_max_pred.unsqueeze(dim=1)

            loss = criterion(y_pred, y,
                             mode=loss_mode,
                             run='val',
                             weights=1)
            loss += criterion(lambd_max_pred, lambd_max,
                              mode=loss_mode,
                              run='val',
                              weights=1)
            loss += criterion(A_max_pred, A_max,
                              mode='manhattan',
                              run='val',
                              weights=1)

        else:
            loss = criterion(y_pred, y,
                             mode=loss_mode,
                             run='val',
                             weights=loss_weights)

        if 'loss_split_re' in topups:
            loss_split = criterion(
                y_pred, y, 
                mode=loss_split_mode, run='val',
                weights=loss_weights
            )
            tot_loss_split = dictAdd([tot_loss_split, loss_split]) if tot_loss_split else loss_split

        if not math.isnan(loss.item()):
            tot_loss += loss.item()
            loss_count += 1

            if isMode(mode, 'e1_e2_e3'):
                e1e2e3_losses[f"val_loss_{x_e1},{x_e2},{x_e3}"] += loss.item()
                e1e2e3_loss_counts[f"val_loss_{x_e1},{x_e2},{x_e3}"] += 1
            elif isMode(mode, 'e1_e2'):
                e1e2_losses[f"val_loss_{x_e1},{x_e2}"] += loss.item()
                e1e2_loss_counts[f"val_loss_{x_e1},{x_e2}"] += 1
            elif isMode(mode, 'e1'):
                e1_losses[f"val_loss_{x_e1}"] += loss.item()
                e1_loss_counts[f"val_loss_{x_e1}"] += 1

        if verbose:
            n_arrow = 50*(batch_idx+1)//n
            progress = "Validation - [{}>{}] ({}/{}) loss : {:.4f}, avg_loss : {:.4f}".format(
                "="*n_arrow, "-"*(50-n_arrow), (batch_idx+1), n, loss.item(), tot_loss/loss_count
            )
            print(progress, end='\r')

    print()
    logg = {
        'val_loss': tot_loss/loss_count,
    }

    # Classwise loss of different materials with different e1, e2
    if isMode(mode, 'e1_e2_e3'):
        for key in e1e2e3_losses:
            e1e2e3_losses[key] /= e1e2e3_loss_counts[key]
        logg.update(e1e2e3_losses)
    elif isMode(mode, 'e1_e2'):
        for key in e1e2_losses:
            e1e2_losses[key] /= e1e2_loss_counts[key]
        logg.update(e1e2_losses)
    elif isMode(mode, 'e1'):
        for key in e1_losses:
            e1_losses[key] /= e1_loss_counts[key]
        logg.update(e1_losses)

    if 'loss_split_re' in topups:
        for key in tot_loss_split:
            tot_loss_split[key] /= loss_count
        logg.update(tot_loss_split)

    return logg