### Using Tensorflow for regression
import tensorflow as tf
# Define the intercept and slope
intercept = tf.Variable(0.1, np.float32) # Initial guess, it will be the target of optimization
slope = tf.Variable(0.1, np.float32) # Initial guess, it will be the target of optimization
opt = tf.keras.optimizers.Adam(learning_rate=0.01) # Optimizer algorithm that reduces loss function
# Define a linear regression model
def linear_regression(intercept, slope, features = X):
return intercept + features*slope # y' = m*X + c
# Compute the predicted values and loss
def loss_function(intercept, slope, targets, features):
predictions = linear_regression(intercept, slope) # y' = m*X + c
return tf.keras.losses.mse(targets, predictions) # the error between y and y'
for j in range(1000): # Do for 1000 iterations (Can be considered epochs)
for batch in pd.read_csv('dataset.csv', chunksize=100): # Load the data in batches
y = np.array(batch['target'], np.float32)
X = np.array(batch['feature'], np.float32)
opt.minimize(lambda: loss_function(intercept, slope, y, X), var_list=[intercept, slope]) # Run optimization algorithm to minimize loss function
print(loss_function(intercept, slope)) # Verbose that will print reduced error after each iteration
print(intercept.numpy(), slope.numpy()) # Print the trained parameters
### Note: the opt.minimize() code can also be broken as
# with tf.GradientTape() as tape:
# loss = loss_function(intercept, slope, price_batch, size_batch)
#
# gradients = tape.gradient(loss, [intercept, slope])
# opt.apply_gradients(zip(gradients, [intercept, slope]))
### Using tensorflow for simple example of deep learning
features = tf.Variable(tf.random.normal([50, 10])) # A dataset with 50 rows and 10 columns
weights = tf.Variable(tf.random.normal([10, 30])) # weights should be in shape so that can be matrix multiplied (format = col X choice) 10 inout neuron comes to your choice of hidden layer neurons
bias = tf.Variable(tf.zeros([30])) # The bias should have same number of rows as the column of matrix multiplication of features and weights
opt = tf.keras.optimizers.RMSprop(learning_rate=0.01, momentum=0.9) # Optimizer algorithm that reduces loss function
def model(bias, weights, features ):
product = tf.matmul(features, weights)
return tf.keras.activations.sigmoid(product+bias) # Forward propagation with activation function that returns y'
def loss_function(bias, weights, targets, features):
predictions = model(bias, weights) # Calculating y'
return tf.keras.losses.binary_crossentropy(targets, predictions) # Calculating error between y and y'
for j in range(1000): # Do for 1000 iterations (Can be considered epochs)
for batch in pd.read_csv('dataset.csv', chunksize=100): # Load the data in batches
y = np.array(batch['target'], np.float32)
X = np.array(batch['feature'], np.float32)
opt.minimize(lambda: loss_function(bias, weights), var_list=[bias, weights]) # Run optimization algorithm to minimize loss function
print(loss_function(intercept, slope)) # Verbose that will print reduced error after each iteration
print(intercept.numpy(), slope.numpy()) # Print the trained parameters
### Example of creating multiple Dense layers sequentially
features = np.array([[2., 2., 43.]], dtype=np.float32) # should be constants, also: tf.constant(features). This is the inputs
bias1 = tf.Variable(1.0) # Initialize bias1, should have same number of rows as feature cols, can also be used by: tf.Variable(tf.ones([cols]))
weights1 = tf.ones((3, 2)) # Initialize weights1 as 3x2 variable of ones (incoming 3 columns as input neurons, hidden layer 2 neurons)
product1 = tf.matmul(features, weights1) # Perform matrix multiplication of borrower_features and weights1
dense1 = tf.keras.activations.sigmoid(product1 + bias1) # Apply sigmoid activation function to product1 + bias1
print("\n dense1's output shape: {}".format(dense1.shape)) # Print shape of dense1
bias2 = tf.Variable(1.0) # Initialize bias2
weights2 = tf.ones((2, 1)) # Initialize weights2 as 2x1 variable of ones (previous layer 2 neurons, final layer 1 neuron)
product2 = tf.matmul(dense1,weights2) # Perform matrix multiplication of dense1 and weights2
prediction = tf.keras.activations.sigmoid(product2 + bias2) # Apply activation to product2 + bias2 and print the prediction
print('\n prediction: {}'.format(prediction.numpy()[0,0]))
print('\n actual: 1')
# Print error for first five examples
actual = df["target"].values
error = actual[:5] - prediction.numpy()[:5]
## NOTE: Also see CUSTOM TENSORFLOW MODEL and TENSORFLOW BASICS