Esempio n. 1
0
def readXML_pascal(path, name_to_label, c=0, is_pytorch=False):
    #Create a dictionnary from the XML file (given its path here) following naming patterns used by torchvision for detection
    root = ET.parse(path).getroot()
    info = {}
    shape = (int(root.find('imagesize/nrows').text),
             int(root.find('imagesize/ncols').text), 3)
    folder = root.find('folder').text
    name = root.find('filename').text
    info['boxes'] = []
    info['labels'] = []
    info['area'] = []
    info['image_id'] = np.array([c])
    info['parents'] = []
    for type_tag in root.findall('object'):
        info['parents'].append(type_tag.find('parts/ispartof'))
        for pt in type_tag.findall('polygon'):
            xs = pt.findall('pt/x')
            ys = pt.findall('pt/y')
        xs = [int(x.text) for x in xs]
        ys = [int(y.text) for y in ys]
        local_box = [min(xs), min(ys), max(xs), max(ys)]
        if local_box[3] > shape[0] or local_box[2] > shape[1]:
            continue
        info['labels'].append(name_to_label[type_tag.find('name').text])
        info['boxes'].append(local_box)
        info['area'].append(
            (local_box[2] - local_box[0]) * (local_box[3] - local_box[1]))
    info['boxes'] = np.array(info['boxes'])
    info['area'] = np.array(info['area'])
    info['labels'] = np.array(info['labels'])
    info['iscrowd'] = np.zeros_like(info['area'])
    info['parents'] = np.arrays(info['parents'])
    return name, folder, shape, info
Esempio n. 2
0
    def __init__(self, ns=[10, 10], Ls=[1., 1.]):

        self.dim = len(ns)
        self.n_cells = np.prod(ns)
        self.cell_dims = ns
        self.lens = Ls
        self.cell_ds = np.arrays(ns) / np.array(Ls)

        ## The create_grid function initializes all of the matrices needed for deposition
        self._create_grid()

        self.kind = 'Field'
Esempio n. 3
0
    def __init__(self):
        #supplies the test and train data in the format
        with ZipFile('model.zip', 'r') as zipObj:
            zipObj.extractall()

        training_data_folders = {}
        entries = os.listdir('model/')

        size = 224

        for entry in entries:
            training_data_folders[(Path("model") / entry)] = entry

        images = []
        labels = []

        for folder in training_data_folders:

            for img in folder.glob("*.png"):
                img = Image.open(img)
                wpercent = (size / float(img.size[0]))
                hsize = int((float(img.size[1]) * float(wpercent)))
                img = img.resize((size, hsize), Image.ANTIALIAS)
                img.save(img)
                img = image.load_img(img)
                image_array = image.img_to_array(img)
                images.append(image_array)
                labels.append(training_data_folders[folder])

            for img in folder.glob("*.jpg"):
                img = Image.open(img)
                wpercent = (size / float(img.size[0]))
                hsize = int((float(img.size[1]) * float(wpercent)))
                img = img.resize((size, hsize), Image.ANTIALIAS)
                img.save(img)
                img = image.load_img(img)
                image_array = image.img_to_array(img)
                images.append(image_array)
                labels.append(training_data_folders[folder])

            for img in folder.glob("*.jpg"):
                img = Image.open(img)
                wpercent = (size / float(img.size[0]))
                hsize = int((float(img.size[1]) * float(wpercent)))
                img = img.resize((size, hsize), Image.ANTIALIAS)
                img.save(img)
                img = image.load_img(img)
                image_array = image.img_to_array(img)
                images.append(image_array)
                labels.append(training_data_folders[folder])

        x_train = np.arrays(images)
        y_train = np.array(labels)
Esempio n. 4
0
def load_pose(data_json_path, joints_num=17):
    """
    load pose data from data json
    """
    f = open(data_json_path)
    data = json.load(f)
    img_num = len(data)
    joints = np.arrays(img_num, joints_num, 2)
    for img in data:
        joints[img, :, :] = img['joints']

    return joints
Esempio n. 5
0
    def sample(self, sample_size):
        """ Uniformly sample (s,a,r,s) experiences from the replay dataset.

        Args:
            sample_size: (self explanatory)

        Returns:
            A tuple of numpy arrays for the |sample_size| experiences.

                (state, action, reward, next_state)

            The zeroth dimension of each array corresponds to the experience
            index. The i_th experience is given by:

                (state[i], action[i], reward[i], next_state[i])
        """
        if sample_size >= self.valid:
            raise ValueError(
                  "Can't draw sample of size %d from replay dataset of size %d"
                  % (sample_size, self.valid))

        idx = random.sample(xrange(0, self.valid), sample_size)

        # We can't include head - 1 in sample because we don't know the next
        # state, so simply resample (very rare if dataset is large)
        while (self.head - 1) in idx:
            idx = random.sample(xrange(0, self.valid), sample_size)

        idx.sort()  # Slicing for hdf5 must be in increasing order
        next_idx = [x + 1 for x in idx]

        # next_state might wrap around end of dataset
        if next_idx[-1] == self.dset_size:
            next_idx[-1] = 0
            shape = (sample_size,)+self.state[0].shape
            next_states = np.empty(shape, dtype=np.uint8)
            next_states[0:-1] = self.state[next_idx[0:-1]]
            next_states[-1] = self.state[0]
        else:
            next_states = self.state[next_idx]

        is_non_terminal = np.arrays([not self.is_terminal(idx)
                                     for idx in next_idx], dtype=bool)

        return (self.state[idx],
                self.action[next_idx],
                self.reward[next_idx],
                next_states,
                self.non_terminal[next_idx])
Esempio n. 6
0
    def sample(self, sample_size):
        """ Uniformly sample (s,a,r,s) experiences from the replay dataset.

        Args:
            sample_size: (self explanatory)

        Returns:
            A tuple of numpy arrays for the |sample_size| experiences.

                (state, action, reward, next_state)

            The zeroth dimension of each array corresponds to the experience
            index. The i_th experience is given by:

                (state[i], action[i], reward[i], next_state[i])
        """
        if sample_size >= self.valid:
            raise ValueError(
                "Can't draw sample of size %d from replay dataset of size %d" %
                (sample_size, self.valid))

        idx = random.sample(xrange(0, self.valid), sample_size)

        # We can't include head - 1 in sample because we don't know the next
        # state, so simply resample (very rare if dataset is large)
        while (self.head - 1) in idx:
            idx = random.sample(xrange(0, self.valid), sample_size)

        idx.sort()  # Slicing for hdf5 must be in increasing order
        next_idx = [x + 1 for x in idx]

        # next_state might wrap around end of dataset
        if next_idx[-1] == self.dset_size:
            next_idx[-1] = 0
            shape = (sample_size, ) + self.state[0].shape
            next_states = np.empty(shape, dtype=np.uint8)
            next_states[0:-1] = self.state[next_idx[0:-1]]
            next_states[-1] = self.state[0]
        else:
            next_states = self.state[next_idx]

        is_non_terminal = np.arrays(
            [not self.is_terminal(idx) for idx in next_idx], dtype=bool)

        return (self.state[idx], self.action[next_idx], self.reward[next_idx],
                next_states, self.non_terminal[next_idx])
Esempio n. 7
0
 def One_step_search(self, state, Value):
     """
     Function for calculating the value function v(s)
     -----------
     Function parameters
     --------------
     state: current state, for example, say s: coordinates [x,y]
     V: value function: array
     A: all possible actions: array/ list
     
     """
     Action_list = np.arrays(self.env.actions)
     for actions in range(self.env.actions):
         
         for prabability, next_state, reward, done in self.env.array[state][actions]:
         
             Action_list[actions] += probability*(reward + self.lambda*V[next_state])
         
         return Action_list
Esempio n. 8
0
    def _compute_sum_last_errors(self):
        import torch
        with torch.no_grad():
            X = self.memory_x[self.evaluation_index:self._memory_size(self)]
            if type(X[0]) == type(torch.zeros(0)):
                X = torch.stack(X)
            else:
                X = torch.tensor(X).to(self.device)

            a = self.memory_a[self.evaluation_index:self._memory_size(self)]
            if type(a[0]) == type(torch.zeros(0)):
                a = torch.stack(a)
            else:
                a = torch.tensor(a).to(self.device)

            # il y a surement moyen de faire ca en mode matricielle
            losses = []
            for ae in self.auto_encoders:
                loss = self.loss(X, ae(X).gather(1, a)).item()
                losses.append(loss)
        losses = np.arrays(losses)
        return losses * len(X)
Esempio n. 9
0
# mixture of normal distributions in 2D
# Distributions should not be too far away each other not be too close each other

# Performance of methods depends on (i) locations/variance of normal distributions and (ii) sample size

# Write the codes to generate a sample set first, and
# Visualize the sample records in 2D and grab the difficuty of the task at a glance

# Number of normal distributions :
ndist = 3

# Sample size :
nsample = 100

# Centers of normal distributions
import numpy as np
ctrs = np.arrays([[0.0,0],[1,0],[0.7,0.4]])

# Sigma matrix
sigmas = np.arrays([[],[]],[[],[]],[[],[]])
import cv2
import numpy as np

#CAPTURING THE VIDEO FROM THE DEFAULT WEBCAM
cap = cv2.VideoCapture(0)

while True:
    # ' _ ' IS A PYTHON CONVENTION THAT TELLS THE READER/USER TO IGNORE THE VALUE WHICH IS IN THERE.
    _, frame = cap.read()

    #HSV IS IMPORTANT FOR DIFFERENT VALUES AND RANGES OF COLORS INSTEAD OF RGB
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    #MAXIMUM AND MINIMUM OF THE COLOR VALUES
    # WE NEED TO CHANGE THE VALUE OF THE FOLLOWING IN ORDER TO GET THE DESIRED RESULTS
    color_value_1 = np.arrays([0, 0, 0])
    color_value_2 = np.arrays([255, 255, 255])

    # MASK IS THE RANGE BETWEEN color_value_1 AND color_value_2. SO BASICALLY MASK IS CURRENTLY IDENTICAL TO FRAME
    mask = cv2.inRange(hsv, color_value_1, color_value_2)

    #OPERATING A 'BITWISE-AND' OPERATION ON MASK AND FRAME
    result = cv2.bitwise_and(frame, frame, mask=mask)

    #AVERAGING THE PIXELS
    kernel = np.ones((10, 10), np.float32) / 100

    # SMOOTHING
    smooth_result = cv2.filter2D(result, -1, kernel)

    #BELOW IS THE SHOCASING OF A FEW DIFFERENT TYPES OF BLUR OPEN-CV HAS TO OFFER
Esempio n. 11
0
    def classify(self):

        sorted_cards = np.sort(self.cards, axis=None)

        flush = False
        straight = False

        ranks = {}
        suits = {}

        straight_count = 1
        straight_cards = np.array([])

        ace_present = False

        highest_rank = 0
        high_card = None
        
        previous_card = None

        for i in range(len(sorted_cards)):

            card = sorted_cards[i]
            rank = card.get_rank()
            suit = card.get_suit()

            if rank not in ranks:
                ranks[rank] = np.array([card])
            else:
                ranks[rank] = np.append(ranks[rank], card)
            
            if suit not in suits:
                suits[suit] = np.array([card])
            else:
                suits[suit] = np.append(suits[suit], card)
            
            if previous_card is None:
                previous_card = card

            else:
                
                if highest_rank < rank:
                    highest_rank = rank
                    high_card = card

                previous_rank = previous_card.get_rank()
                
                # Accumulates points towards a straight classification
                if previous_rank == rank - 1:
                    straight_count += 1
                # Resets points
                else:
                    straight_count = 1
                
                # Defines straight
                if straight_count >= 5:
                    straight = True
                    straight_cards = np.arrays([sorted_cards[i-4], sorted_cards[i-3], sorted_cards[i-2], sorted_cards[i-1], sorted_cards[i]])

                # Handles ace high straights
                elif straight_count == 4 and rank == 13 and ace_present:
                    straight = True
                    straight_cards = np.arrays([sorted_cards[i-3], sorted_cards[i-2], sorted_cards[i-1], sorted_cards[i], sorted_cards[0]])

        if straight:
            self.assign_classification(handClassification=4, cards=straight_cards)

        # Assigns junk card classification
        if ace_present:
            high_card = sorted_cards[0]
        self.assign_classification(handClassification=0, cards=high_card)

        # Checking for float classification
        for suit, cards in suits.items():
            if len(cards) == 5:
                flush = True

            if flush:
                self.assign_classification(handClassification=5, cards=cards)

                # Checking for special straight classifications (straight flush, royal flush)
                if straight:
                    if cards == straight_cards:
                        self.assign_classification(handClassification=8, cards=cards)

                        if cards[-1].rank == 1:
                            self.assign_classification(handClassification=9, cards=cards)
        
        # Checking for pair classifications (pair, two pair, 3oak, 4oak, full house)
        pairs = np.array([])
        for rank, cards in ranks.items():

            appearances = len(cards)
            # print(f"rank: {rank}, appearances: {appearances}, cards: {cards}")
            # Check Pair
            if appearances == 2:

                # Check for full house
                if len(pairs) == 2:
                    pairs = np.append(pairs, cards)
                    self.assign_classification(handClassification=2, cards=pairs)
                
                if len(pairs) == 3:
                    pairs = np.append(pairs, cards)
                    self.assign_classification(handClassification=6, cards=pairs)

                pairs = cards
                self.assign_classification(handClassification=1, cards=cards)

            # Check 3 of a kind
            if appearances == 3:
                # Check for full house
                if len(pairs) == 2:
                    pairs = np.append(pairs, cards)
                    self.assign_classification(handClassification=6, cards=pairs)

                pairs = cards
                self.assign_classification(handClassification=3, cards=cards)

            # Checks 4 of a kind
            if appearances == 4:
                self.assign_classification(handClassification=7, cards=cards)
Esempio n. 12
0
test_end = dt.datetime.now()
test_data = web.DataReader(company, 'yahoo', test_start, test_end)
actual_prices = test_data['Close'].values
total_dataset = pd.concat((data['Close'], test_data['Close']))

model_inputs = total_dataset[len(total_dataset) - len(test_data) -
                             prediction_days:].values
model_inputs = model_inputs.reshape(-1, 1)
model_inputs = scaler.transform(model_inputs)

# Make Predictions on Test  Data
x_test = []

for x in range(prediction_days, len(model_inputs)):
    x_test.append(model_inputs[x - prediction_days:x, 0])

x_test = np.arrays(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))

predicted_prices = model.predict(x_test)
predicted_prices = scaler.inverse_transform(predicted_prices)

# Plot the Test Predictions
plt.plot(actual_prices, color="black", label=f"Actual {company} price")
plt.plot(predicted_prices, color="green", label=f"Predicted {company} price")
plt.title(f"{company} share price ")
plt.xlabel('Time')
plt.ylabel(f'{company} Share price')
plt.legend()
plt.show()
Esempio n. 13
0
np.mean(CRSP_Unconstrained2.fit_err)
np.std(CRSP_Unconstrained2.fit_err)


np.mean(CRSP_Unconstrained2.fit_err['2011-01-01':])
np.std(CRSP_Unconstrained2.fit_err['2011-01-01':])


np.mean(CRSP_EW.price_err)
np.std(CRSP_EW.price_err)
np.mean(CRSP_EW.price_err['2011-01-01':])
np.std(CRSP_EW.price_err['2011-01-01':])


number_bonds = np.array([len(r['cusips']) for r in CRSP_Unconstrained_Results_list])
np.arrays(number_bonds).mean()
np.argmin(number_bonds)
CRSP_Unconstrained2_Results_list[0].keys()

len(CRSP_by_cusip[CRSP_by_cusip.ITYPE < 5])
len(CRSP_by_cusip[CRSP_by_cusip.ITYPE == 4])
len(CRSP_by_cusip[CRSP_by_cusip.ITYPE == 2])
len(CRSP_by_cusip[CRSP_by_cusip.ITYPE == 1])


unconstrained_beta = np.array([r['nss_params'] for r in CRSP_Unconstrained2_Results_list])
ridge_beta = np.array([r['nss_params'] for r in CRSP_ZeroRidge_Results_list])

unconstrained_beta = np.array([r['nss_params'] for r in CRSP_EW_Results_list])
ridge_beta = np.array([r['nss_params'] for r in CRSP_EWZeroRidge_Results_list])
Esempio n. 14
0
def predict(clf, data):
    data = np.arrays(data)
    return int(clf.predict(data.reshape(1, -1)))