Skip to main content
import numpy as np class LinearRegressor(object): def __init__(self, num_features): self.num_features = num_features self.w = np.random.randn(num_features, 1).astype(np.float32) self.b = np.array(0.0).astype(np.float32) def forward(self, x): return np.dot(x, self.w) + self.b @staticmethod def loss(y_pred, y_true): l = np.average(np.power(y_pred - y_true, 2)) / 2 return l def calculate_gradients(self, x, y_pred, y_true): self.dl_dw = np.dot(x.T, y_pred - y_true) / len(x) self.dl_db = np.mean(y_pred - y_true) def optimize(self, step_size): self.w -= step_size*self.dl_dw self.b -= step_size*self.dl_db def train(self, x, y, step_size=1.0): y_pred = self.forward(x) l = self.loss(y_pred=y_pred, y_true=y) self.calculate_gradients(x=x, y_pred=y_pred, y_true=y) self.optimize(step_size=step_size) return l def evaluate(self, x, y): return self.loss(self.forward(x), y_true)   check_reg = LinearRegressor(num_features=1) x = np.array(list(range(1000))).reshape(-1, 1) y = x losses = [] for iteration in range(100): loss = check_reg.train(x=x,y=y, step_size=0.001) losses.append(loss) if iteration % 1 == 0: print("Iteration: {}".format(iteration)) print(loss) 
class LinearRegressor(object): def __init__(self, num_features): self.num_features = num_features self.w = np.random.randn(num_features, 1).astype(np.float32) self.b = np.array(0.0).astype(np.float32) def forward(self, x): return np.dot(x, self.w) + self.b @staticmethod def loss(y_pred, y_true): l = np.average(np.power(y_pred - y_true, 2)) / 2 return l def calculate_gradients(self, x, y_pred, y_true): self.dl_dw = np.dot(x.T, y_pred - y_true) / len(x) self.dl_db = np.mean(y_pred - y_true) def optimize(self, step_size): self.w -= step_size*self.dl_dw self.b -= step_size*self.dl_db def train(self, x, y, step_size=1.0): y_pred = self.forward(x) l = self.loss(y_pred=y_pred, y_true=y) self.calculate_gradients(x=x, y_pred=y_pred, y_true=y) self.optimize(step_size=step_size) return l def evaluate(self, x, y): return self.loss(self.forward(x), y_true) x = np.array(list(range(1000))).reshape(-1, 1) y = x losses = [] for iteration in range(100): loss = check_reg.train(x=x,y=y, step_size=0.001) losses.append(loss) if iteration % 1 == 0: print("Iteration: {}".format(iteration)) print(loss) 
import numpy as np class LinearRegressor(object): def __init__(self, num_features): self.num_features = num_features self.w = np.random.randn(num_features, 1).astype(np.float32) self.b = np.array(0.0).astype(np.float32) def forward(self, x): return np.dot(x, self.w) + self.b @staticmethod def loss(y_pred, y_true): l = np.average(np.power(y_pred - y_true, 2)) / 2 return l def calculate_gradients(self, x, y_pred, y_true): self.dl_dw = np.dot(x.T, y_pred - y_true) / len(x) self.dl_db = np.mean(y_pred - y_true) def optimize(self, step_size): self.w -= step_size*self.dl_dw self.b -= step_size*self.dl_db def train(self, x, y, step_size=1.0): y_pred = self.forward(x) l = self.loss(y_pred=y_pred, y_true=y) self.calculate_gradients(x=x, y_pred=y_pred, y_true=y) self.optimize(step_size=step_size) return l def evaluate(self, x, y): return self.loss(self.forward(x), y_true)   check_reg = LinearRegressor(num_features=1) x = np.array(list(range(1000))).reshape(-1, 1) y = x losses = [] for iteration in range(100): loss = check_reg.train(x=x,y=y, step_size=0.001) losses.append(loss) if iteration % 1 == 0: print("Iteration: {}".format(iteration)) print(loss) 
Source Link

Problem in the linear regression implementation

I am new to Machine learning and I was trying to implement vectorized linear regression from scratch using numpy. I tried testing out the implementation using y=x. But my loss is increasing and I am unable to understand why. It will be great if someone could point out why is this happening. Thanks in advance!

class LinearRegressor(object): def __init__(self, num_features): self.num_features = num_features self.w = np.random.randn(num_features, 1).astype(np.float32) self.b = np.array(0.0).astype(np.float32) def forward(self, x): return np.dot(x, self.w) + self.b @staticmethod def loss(y_pred, y_true): l = np.average(np.power(y_pred - y_true, 2)) / 2 return l def calculate_gradients(self, x, y_pred, y_true): self.dl_dw = np.dot(x.T, y_pred - y_true) / len(x) self.dl_db = np.mean(y_pred - y_true) def optimize(self, step_size): self.w -= step_size*self.dl_dw self.b -= step_size*self.dl_db def train(self, x, y, step_size=1.0): y_pred = self.forward(x) l = self.loss(y_pred=y_pred, y_true=y) self.calculate_gradients(x=x, y_pred=y_pred, y_true=y) self.optimize(step_size=step_size) return l def evaluate(self, x, y): return self.loss(self.forward(x), y_true) x = np.array(list(range(1000))).reshape(-1, 1) y = x losses = [] for iteration in range(100): loss = check_reg.train(x=x,y=y, step_size=0.001) losses.append(loss) if iteration % 1 == 0: print("Iteration: {}".format(iteration)) print(loss) 

Output

Iteration: 0 612601.7859402705 Iteration: 1 67456013215.98818 Iteration: 2 7427849474110884.0 Iteration: 3 8.179099502901393e+20 Iteration: 4 9.006330707513148e+25 Iteration: 5 9.917228672922966e+30 Iteration: 6 1.0920254505132042e+36 Iteration: 7 1.2024725981084638e+41 Iteration: 8 1.324090295064888e+46 Iteration: 9 1.4580083421516024e+51 Iteration: 10 1.60547085025467e+56 Iteration: 11 1.7678478362285333e+61 Iteration: 12 1.946647415292399e+66 Iteration: 13 2.1435307416407376e+71 Iteration: 14 2.3603265498975516e+76 Iteration: 15 2.599049318486855e+81 Iteration: 16 nan Iteration: 17 nan Iteration: 18 nan Iteration: 19 nan Iteration: 20 nan Iteration: 21 nan Iteration: 22 nan Iteration: 23 nan Iteration: 24 nan Iteration: 25 nan Iteration: 26 nan Iteration: 27 nan Iteration: 28 nan Iteration: 29 nan Iteration: 30 nan Iteration: 31 nan Iteration: 32 nan Iteration: 33 nan Iteration: 34 nan Iteration: 35 nan Iteration: 36 nan Iteration: 37 nan Iteration: 38 nan Iteration: 39 nan Iteration: 40 nan Iteration: 41 nan Iteration: 42 nan Iteration: 43 nan Iteration: 44 nan Iteration: 45 nan Iteration: 46 nan Iteration: 47 nan Iteration: 48 nan Iteration: 49 nan Iteration: 50 nan Iteration: 51 nan Iteration: 52 nan Iteration: 53 nan Iteration: 54 nan Iteration: 55 nan Iteration: 56 nan Iteration: 57 nan Iteration: 58 nan Iteration: 59 nan Iteration: 60 nan Iteration: 61 nan Iteration: 62 nan Iteration: 63 nan Iteration: 64 nan Iteration: 65 nan Iteration: 66 nan Iteration: 67 nan Iteration: 68 nan Iteration: 69 nan Iteration: 70 nan Iteration: 71 nan Iteration: 72 nan Iteration: 73 nan Iteration: 74 nan Iteration: 75 nan Iteration: 76 nan Iteration: 77 nan Iteration: 78 nan Iteration: 79 nan Iteration: 80 nan Iteration: 81 nan Iteration: 82 nan Iteration: 83 nan Iteration: 84 nan Iteration: 85 nan Iteration: 86 nan Iteration: 87 nan Iteration: 88 nan Iteration: 89 nan Iteration: 90 nan Iteration: 91 nan Iteration: 92 nan Iteration: 93 nan Iteration: 94 nan Iteration: 95 nan Iteration: 96 nan Iteration: 97 nan Iteration: 98 nan Iteration: 99 nan