Teradata Package for Python Function Reference | 20.00 - Random Forest - Teradata Package for Python - Look here for syntax, methods and examples for the functions included in the Teradata Package for Python.

Teradata® Package for Python Function Reference - 20.00

Deployment
VantageCloud
VantageCore
Edition
Enterprise
IntelliFlex
VMware
Product
Teradata Package for Python
Release Number
20.00
Published
March 2024
Language
English (United States)
Last Update
2024-04-10
dita:id
TeradataPython_FxRef_Enterprise_2000
Product Category
Teradata Vantage

PMMLPredict() using Random Forest model.

Setup

In [1]:
# Import required libraries
import tempfile
import getpass
from teradataml import PMMLPredict, DataFrame, load_example_data, create_context, \
db_drop_table, remove_context, save_byom, delete_byom, retrieve_byom, list_byom
from teradataml.options.configure import configure
In [2]:
# Create the connection.
con = create_context(host=getpass.getpass("Hostname: "), 
                     username=getpass.getpass("Username: "),
                     password=getpass.getpass("Password: "))
Hostname: ········
Username: ········
Password: ········

Load example data.

In [3]:
# Load the example data.
load_example_data("byom", "iris_input")
iris_input = DataFrame("iris_input")

# Create 2 samples of input data - sample 1 will have 80% of total rows and sample 2 will have 20% of total rows. 
iris_sample = iris_input.sample(frac=[0.8, 0.2])
iris_sample
Out[3]:
id sepal_length sepal_width petal_length petal_width species sampleid
59 6.6 2.9 4.6 1.3 2 1
80 5.7 2.6 3.5 1.0 2 1
120 6.0 2.2 5.0 1.5 3 1
101 6.3 3.3 6.0 2.5 3 1
17 5.4 3.9 1.3 0.4 1 1
61 5.0 2.0 3.5 1.0 2 1
38 4.9 3.6 1.4 0.1 1 2
78 6.7 3.0 5.0 1.7 2 2
141 6.7 3.1 5.6 2.4 3 1
40 5.1 3.4 1.5 0.2 1 1
In [4]:
# Create train dataset from sample 1 by filtering on "sampleid" and drop "sampleid" column as it is not required for training model.
iris_train = iris_sample[iris_sample.sampleid == "1"].drop("sampleid", axis = 1)
iris_train
Out[4]:
id sepal_length sepal_width petal_length petal_width species
97 5.7 2.9 4.2 1.3 2
120 6.0 2.2 5.0 1.5 3
118 7.7 3.8 6.7 2.2 3
101 6.3 3.3 6.0 2.5 3
139 6.0 3.0 4.8 1.8 3
61 5.0 2.0 3.5 1.0 2
38 4.9 3.6 1.4 0.1 1
78 6.7 3.0 5.0 1.7 2
17 5.4 3.9 1.3 0.4 1
40 5.1 3.4 1.5 0.2 1
In [5]:
# Create test dataset from sample 2 by filtering on "sampleid" and drop "sampleid" column as it is not required for scoring.
iris_test = iris_sample[iris_sample.sampleid == "2"].drop("sampleid", axis = 1)
iris_test
Out[5]:
id sepal_length sepal_width petal_length petal_width species
117 6.5 3.0 5.5 1.8 3
57 6.3 3.3 4.7 1.6 2
110 7.2 3.6 6.1 2.5 3
141 6.7 3.1 5.6 2.4 3
49 5.3 3.7 1.5 0.2 1
76 6.6 3.0 4.4 1.4 2
108 7.3 2.9 6.3 1.8 3
26 5.0 3.0 1.6 0.2 1
32 5.4 3.4 1.5 0.4 1
80 5.7 2.6 3.5 1.0 2

Train Random Forest Model.

In [6]:
# Import required libraries.
import numpy as np
from sklearn import tree
from nyoka import skl_to_pmml
from sklearn.pipeline import Pipeline
from sklearn_pandas import DataFrameMapper
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
In [7]:
# Convert teradataml dataframe to pandas dataframe.
# features : Training data.
# target : Training targets.
traid_pd = iris_train.to_pandas()
features = traid_pd.columns.drop('species')
target = 'species'
In [8]:
# Generate the Random forest model.
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
rf_pipe_obj = Pipeline([
    ("mapping", DataFrameMapper([
    (['sepal_length', 'sepal_width'], StandardScaler()) ,
    (['petal_length', 'petal_width'], imputer)
    ])),
    ("rfc", RandomForestClassifier(n_estimators = 100))
])
In [9]:
rf_pipe_obj.fit(traid_pd[features], traid_pd[target])
Out[9]:
Pipeline(steps=[('mapping',
                 DataFrameMapper(drop_cols=[],
                                 features=[(['sepal_length', 'sepal_width'],
                                            StandardScaler()),
                                           (['petal_length', 'petal_width'],
                                            SimpleImputer())])),
                ('rfc', RandomForestClassifier())])

Save the model in PMML format.

In [10]:
temp_dir = tempfile.TemporaryDirectory()
model_file_path = f"{temp_dir.name}/iris_rf_class_model.pmml"
In [11]:
skl_to_pmml(rf_pipe_obj, features, target, model_file_path)

Save the model in Vantage.

In [12]:
# Save the PMML Model in Vantage.
save_byom("pmml_random_forest_iris", model_file_path, "byom_models")
Created the model table 'byom_models' as it does not exist.
Model is saved.

List the models from Vantage.

In [13]:
# List the PMML Model in Vantage.
list_byom("byom_models")
                                            model
model_id                                         
pmml_random_forest_iris  b'3C3F786D6C20766572...'

Retrieve the model from Vantage.

In [14]:
# Retrieve the model from table "byom_models", using the model id 'pmml_random_forest_iris'.
modeldata = retrieve_byom("pmml_random_forest_iris", "byom_models")

Set "configure.byom_install_location" to the database where BYOM functions are installed.

In [15]:
configure.byom_install_location = getpass.getpass("byom_install_location: ")
byom_install_location: ········

Score the model.

In [16]:
# Perform prediction using PMMLPredict() and the PMML model stored in Vantage.
result = PMMLPredict(
                    modeldata = modeldata,
                    newdata = iris_test,
                    accumulate = ['id', 'sepal_length', 'petal_length'],
                    overwrite_cached_models = '*',
                    )
In [17]:
# Print the query.
print(result.show_query())
SELECT * FROM "mldb".PMMLPredict(
	ON "MLDB"."ml__select__163423868255528" AS InputTable
	PARTITION BY ANY 
	ON (select model_id,model from "MLDB"."ml__filter__163423223025197") AS ModelTable
	DIMENSION
	USING
	Accumulate('id','sepal_length','petal_length')
	OverwriteCachedModel('*')
) as sqlmr
In [18]:
# Print the result.
result.result
Out[18]:
id sepal_length petal_length prediction json_report
30 4.7 1.6 1 {"probability_1":1.0,"predicted_species":1,"probability_2":0.0,"probability_3":0.0}
57 6.3 4.7 2 {"probability_1":0.0,"predicted_species":2,"probability_2":0.9,"probability_3":0.1}
118 7.7 6.7 3 {"probability_1":0.0,"predicted_species":3,"probability_2":0.0,"probability_3":1.0}
34 5.5 1.4 1 {"probability_1":1.0,"predicted_species":1,"probability_2":0.0,"probability_3":0.0}
87 6.7 4.7 2 {"probability_1":0.0,"predicted_species":2,"probability_2":0.99,"probability_3":0.01}
148 6.5 5.2 3 {"probability_1":0.0,"predicted_species":3,"probability_2":0.0,"probability_3":1.0}
43 4.4 1.3 1 {"probability_1":1.0,"predicted_species":1,"probability_2":0.0,"probability_3":0.0}
56 5.7 4.5 2 {"probability_1":0.0,"predicted_species":2,"probability_2":1.0,"probability_3":0.0}
9 4.4 1.4 1 {"probability_1":0.99,"predicted_species":1,"probability_2":0.01,"probability_3":0.0}
80 5.7 3.5 2 {"probability_1":0.0,"predicted_species":2,"probability_2":1.0,"probability_3":0.0}

Cleanup.

In [19]:
# Delete the model from table "byom_models", using the model id 'pmml_random_forest_iris'.
delete_byom("pmml_random_forest_iris", "byom_models")
Model is deleted.
In [20]:
# Drop models table.
db_drop_table("byom_models")
Out[20]:
True
In [21]:
# Drop input data tables. 
db_drop_table("iris_input")
Out[21]:
True
In [22]:
# One must run remove_context() to close the connection and garbage collect internally generated objects.
remove_context()
Out[22]:
True