I've started learning ML following Andrew NG course on coursera. I'm trying to implement the gradient descent with linear regression
but I'm not sure what am I missing. According to this
I've tried to implement it but something is wrong. Here is the code. Worth pointing out that this is the first time I'm touching python, without learning the basics.
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
x = [1,2,3,4,5]
y = [1,2,3,4,5]
def Gradient_Descent(x, y, learning_rate, iterations):
theta_1=np.random.randint(low=2, high=5);
theta_0=np.random.randint(low=2, high=5);
m = x.shape[0]
def mean_error(a, b, factor):
sum_mean = 0
for i in range(m):
sum_mean += (theta_0 + theta_1 * a[i]) - b[i] # h(x) = (theta0 + theta1 * x) - y
if factor:
sum_mean *= a[i]
return sum_mean
def perform_cal(theta_0, theta_1, m):
temp_0 = theta_0 - learning_rate * ((1 / m) * mean_error(x, y, False))
temp_1 = theta_1 - learning_rate * ((1 / m) * mean_error(x, y, True))
return temp_0 , temp_1
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(iterations):
theta_0, theta_1 = perform_cal(theta_0, theta_1, m)
ax.clear()
ax.plot(x, y, linestyle='None', marker='o')
ax.plot(x, theta_0 + theta_1*x)
fig.canvas.draw()
x = np.array(x)
y = np.array(y)
Gradient_Descent(x,y, 0.1, 500)
input("Press enter to close program")
What am I doing wrong?