# Very simple model with 1 input layer and 1 output layer
from tensorflow.keras.layers import Input, Dense, Concatenate, Add, Subtract, Multiply, Flatten, Embedding
input_tensor = Input(shape=(1,)) # One column
layer1 = Dense(1, name='Layer-1') # This layer is a transformation function that transforms input tensors into output tensors
layer1_output_tensor = layer1(input_tensor) # for 1 incoming input neuron information in output neuron, y= mx + b. There are 2 parameters, m and b
from tensorflow.keras.models import Model
model = Model(input_tensor, layer1_output_tensor) # We are creating model using the flow of tensors in each sequence of layers
model.compile(optimizer='adam', loss='mae') # Compile model with loss function and optimizer
model.summary()
# VISUALIZE KERAS MODEL with plot_model
model.fit(X_train, y_train, batch_size=64, validation_split=.20, verbose=True) # Train
model.evaluate(X_test, y_test) # Test
### If there is any column with high cardinality categorical integer value, categorical embedding layer is used to map int to float
input_tensor = Input(shape=(1,)) # One column as (This input layer is just tensors and not an actual layers)
# input_dim=size of unique tokens, input_length= length of input sequence, output_dim= dense vector embedding matrix columns in float
layer1 = Embedding(input_dim=n_unique_categories, input_length=1, output_dim=1, name='Team-Strength-Lookup') # 1 value as input, 1 value as output
layer1_output_tensor = layer1(input_tensor)
# Embedding layers increase the dimensionality of data (The third dimension), so we need to flatten it with Flattening layer
layer2 = Flatten()
final_output_tensor = layer2(layer1_output_tensor)
# Creating a model with Model, which will also allow us to share the model with other layers
model = Model(input_tensor, final_output_tensor) # Construct the flow of tensors with input tensor and output tensor
# Sharing the model
input_tensor_1 = Input((1,))
input_tensor_2 = Input((1,))
output_tensor_1 = model(input_tensor_1) # # Takes in incoming tensors, the whole model acts as a single layer
output_tensor_2 = model(input_tensor_2) # # Takes in incoming tensors, the whole model acts as a single layer
### Sharing single layer
input_tensor_1 = Input((1,))
input_tensor_2 = Input((1,))
shared_layer = Dense(1)
output_tensor_1 = shared_layer(input_tensor_1) # Takes in incoming tensors
output_tensor_2 = shared_layer(input_tensor_2) # Takes in incoming tensors
### Create model with Merging layers
in_tensor_1 = Input((1,))
in_tensor_2 = Input((1,))
merge_layer = Concatenate() # There are also Add, Subtract, Multiply merging layers
out_tensor = merge_layer([in_tensor_1, in_tensor_2]) # Takes in a list of incoming tensors
model = Model(inputs=[in_tensor_1, in_tensor_2], outputs=out_tensor)
model.compile(optimizer='adam', loss='mean_absolute_error')
model.fit([data_1, data_2], target) # A list of series as X, and the target series as y which maps to : model.fit(X,y)
in_tensor_1 = Input(shape=(1,))
in_tensor_2 = Input(shape=(1,))
in_tensor_3 = Input(shape=(1,))
out_tensor = Concatenate()([in_tensor_1, in_tensor_2, in_tensor_3])
output_tensor = Dense(1)(out_tensor)
### Stacking models : Requires 2 datasets. Train model1 using dataset1. then predict dataset2 outcome with model1. Store the model1_prediction in dataset2. Now create model2 with model1_prediction column and other numeric variables to get the final output
pred = model1.predict([in_data_1, in_data_2, in_data_3]) # These are 3 series of X
df['pred'] = pred
df[['home','seed_diff','pred']]
# Fitting model with 2 outputs
X = df[['seed_diff']]
y = df[['score_1', 'score_2']] # 2 columns for 2 outputs
model.fit(X, y, epochs=500)
model.get_weights() # See model weights and biases
# A regressor classifier : Placing a classification layer with sigmoid activation function on top of a regression layer (data first go to regression layer, which then converts to a class after going through classification layer with the help of sigmoid function)
# Best use case : penalize for increased chance instead of directly
input_tensor = Input(shape=(1,))
regression_layer = Dense(1)
regression_tensor = regression_layer(input_tensor)
classification_layer = Dense(1, activation='sigmoid')
classification_tensor = classification_layer(regression_tensor)
model = Model(input_tensor, [regression_tensor, classification_tensor]) # since there are 2 decision layers we want to extract information from, they should be in a list
model.compile(loss=['mean_absolute_error', 'binary_crossentropy'], optimizer='adam') # 2 loss function for 2 decision layers
X = df[['abc_feature']]
y_reg = df[['reg_col']]
y_class = df[['class_col']]
model.fit(X, [y_reg, y_class], epochs=100)
# Manually view the probability using weight and bias
model.get_weights() # Get weight and bias for each layer from here, should be in numpy format [[weight]] , [bias]
from scipy.special import expit as sigmoid
print(sigmoid(1 * weight + bias))