xxxxxxxxxx
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
xxxxxxxxxx
X = df.drop(columns=["target"])
Y = df["target"]
# Split into training and testing set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2, random_state=1)
# Model
from sklearn.tree import DecisionTreeRegressor
regression_tree = DecisionTreeRegressor(criterion = "mse")
regression_tree.fit(X_train, Y_train)
# Alternative approach
# in contrast to sklearn's Decision Tree, Snap ML offers multi-threaded CPU/GPU training
from snapml import DecisionTreeRegressor
snapml_dt_gpu = DecisionTreeRegressor(max_depth=4, random_state=45, use_gpu=True)
snapml_dt_cpu = DecisionTreeRegressor(max_depth=8, random_state=45, n_jobs=4)
snapml_dt.fit(X_train, y_train)
# Predict
y_pred = regression_tree.predict(X_test)
# Evaluate the model
from sklearn.metrics import mean_squared_error
regression_tree.score(X_test, Y_test) # R-squared
mean_squared_error(y_test, y_pred) # MSE
xxxxxxxxxx
# Import DecisionTreeRegressor from sklearn.tree
from sklearn.tree import DecisionTreeRegressor
# Instantiate dt
dt = DecisionTreeRegressor(max_depth=8,
min_samples_leaf=0.13,
random_state=3)
# Fit dt to the training set
dt.fit(X_train, y_train)
xxxxxxxxxx
# Decision tree learning algorithm for regression
from pyspark.ml.linalg import Vectors
df = spark.createDataFrame([
(1.0, Vectors.dense(1.0)),
(0.0, Vectors.sparse(1, [], []))], ["label", "features"])
dt = DecisionTreeRegressor(maxDepth=2, varianceCol="variance")
model = dt.fit(df)
model.depth
# 1
model.numNodes
# 3
model.featureImportances
# SparseVector(1, {0: 1.0}
model.numFeatures
# 1
test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
model.transform(test0).head().prediction
# 0.0
test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
model.transform(test1).head().prediction
# 1.0
dtr_path = temp_path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeRegressor.load(dtr_path)
dt2.getMaxDepth()
# 2
model_path = temp_path + "/dtr_model"
model.save(model_path)
model2 = DecisionTreeRegressionModel.load(model_path)
model.numNodes == model2.numNodes
# True
model.depth == model2.depth
# True
model.transform(test1).head().variance
# 0.0