import tensorflow as tf
# Define the intercept and slope
intercept = tf.Variable(0.1, np.float32)
slope = tf.Variable(0.1, np.float32)
# Define a linear regression model
def linear_regression(intercept, slope, features = X):
return intercept + features*slope # y' = m*X + c
# Compute the predicted values and loss
def loss_function(intercept, slope, targets, features):
predictions = linear_regression(intercept, slope) # y' = m*X + c
return tf.keras.losses.mse(targets, predictions) # the error between y and y'
for j in range(1000): # Do for 1000 iterations (Can be considered epochs)
for batch in pd.read_csv('dataset.csv', chunksize=100): # Load the data in batches
y = np.array(batch['target'], np.float32)
X = np.array(batch['feature'], np.float32)
opt.minimize(lambda: loss_function(intercept, slope, y, X), var_list=[intercept, slope]) # Run optimization algorithm to minimize loss function
print(loss_function(intercept, slope)) # Verbose that will print reduced error after each iteration
print(intercept.numpy(), slope.numpy()) # Print the trained parameters
### Note: the opt.minimize() code can also be broken as
# with tf.GradientTape() as tape:
# loss = loss_function(intercept, slope, price_batch, size_batch)
#
# gradients = tape.gradient(loss, [intercept, slope])
# opt.apply_gradients(zip(gradients, [intercept, slope]))