Installation of Packages#

First install packages like numpy, scikit-learn, matplotlib

!pip install numpy scikit-learn matplotlib

Complex Numbers#

A complex number \(z\) is represented by \(z = x + j* y\), where \(x\) and \(y\) are real numbers.

c1 = complex(0, 0)

print(c1)
x = 2
y = 3
c2 = complex(x, y)

print(c2)

Absolute value#

Absolute value of a complex number \(z = x + j* y\) is obtained by the formula \(\sqrt {(x^2 + y^2)}\)

from math import sqrt

absc2 = sqrt(x**2 + y**2)
print(absc2)

Another way is to use the function \(abs()\).

absc2 = abs(c2)
print(absc2)
absc1 = abs(c1)
print(absc1)

Evenly spaced numbers#

import numpy as np

np.linspace(1.0, 5.0, num=5)
import numpy as np

np.linspace(1.0, 5.0, num=10)

Mean#

Arithmetic mean#

If \(a_1, a_2, \ldots, a_n\) are numbers, then the arithmetic mean is obtained by:

\[A=\frac{1}{n}\sum_{i=1}^n a_i=\frac{a_1+a_2+\cdots+a_n}{n}\]
num = np.linspace(1.0, 100.0, num=100)

a = np.mean(num)
print(a)

Geometric Mean#

from scipy.stats import gmean

g = gmean(num)
print(g)

If \(a_1, a_2, \ldots, a_n\) are numbers, then the arithmetic mean is obtained by:

\[\left(\prod_{i=1}^n a_i\right)^\frac{1}{n} = \sqrt[n]{a_1 a_2 \cdots a_n}\]

If \(a_1, a_2, \ldots, a_n\) are numbers, then the harmonic mean is obtained by:

\[H = \frac{n}{\frac1{a_1} + \frac1{a_2} + \cdots + \frac1{a_n}} = \frac{n}{\sum\limits_{i=1}^n \frac1{a_i}} = \left(\frac{\sum\limits_{i=1}^n a_i^{-1}}{n}\right)^{-1}.\]

Harmonic Mean#

from scipy.stats import hmean

h = hmean(num)
print(h)

Classification#

Understanding IRIS dataset#

from sklearn.datasets import load_iris

data = load_iris()

# Target Classes and Names
for target, name in enumerate(data.target_names):
    print(target, name)
# Features
for feature in data.feature_names:
    print(feature)
X = data.data
Y = data.target

print(f"Data size: {len(X)}")
# Printing the data

for x, y in zip(X, Y):
    print(f" {x}: {y} ({data.target_names[y]})")
from sklearn.model_selection import train_test_split

# Random state ensures reproducibility
X_train, X_test, Y_train, Y_test = train_test_split(
    X, Y, test_size=0.33, random_state=42
)

print(f"Training data size: {len(X_train)}")
print(f"Test data size: {len(X_test)}")
from sklearn.metrics import ConfusionMatrixDisplay
import matplotlib.pyplot as plot


def display_confusion_matrix(classifier, title):
    disp = ConfusionMatrixDisplay.from_estimator(
        classifier,
        X_test,
        Y_test,
        display_labels=data.target_names,
        cmap=plot.cm.Blues,
        normalize="true",
    )
    disp.ax_.set_title(title)
    plot.show()
## Classifier
from sklearn import svm, metrics

for kernel in ["linear", "rbf", "poly"]:
    print("")
    classifier = svm.SVC(kernel=kernel, gamma=10)
    classifier.fit(X_train, Y_train)

    predicted_values = classifier.predict(X_test.reshape((X_test.shape[0], -1)))

    # classification report
    title = "SVM classifier with" + kernel + " kernel"
    print(metrics.classification_report(Y_test, predicted_values))
    display_confusion_matrix(classifier, title)

Importation of packages#

We import the necessary packages

import numpy as np
from sklearn.linear_model import Perceptron
from sklearn import datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plot
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay

Load Dataset#

We load the necessary IRIS dataset.

iris = datasets.load_iris()

Description of the Dataset#

Input features#

iris.feature_names

Target feature#

iris.target_names

Verify number of records#

print(f"Number of Input Records: {len(iris.data)}")
print(f"Number of Target Records: {len(iris.target)}")

Visulizing the dataset#

x = iris.data
y = iris.target

plot.scatter(x[:, 0], x[:, 1], c=y, cmap=plot.cm.Set1, edgecolor="k")
plot.xlabel(iris.feature_names[0])
plot.ylabel(iris.feature_names[1])
plot.show()
plot.scatter(x[:, 2], x[:, 3], c=y, cmap=plot.cm.Set1, edgecolor="k")
plot.xlabel(iris.feature_names[2])
plot.ylabel(iris.feature_names[3])
plot.show()
fig = plot.figure(figsize=(6, 6))
ax = fig.add_subplot(projection="3d")

ax.scatter(x[:, 1], x[:, 2], x[:, 3], c=y, cmap=plot.cm.Set1, edgecolor="k")
ax.set_xlabel(iris.feature_names[1])
ax.set_ylabel(iris.feature_names[2])
ax.set_zlabel(iris.feature_names[3])
plot.show()
fig = plot.figure(figsize=(6, 6))
ax = fig.add_subplot(projection="3d")

ax.scatter(x[:, 0], x[:, 2], x[:, 3], c=y, cmap=plot.cm.Set1, edgecolor="k")
ax.set_xlabel(iris.feature_names[0])
ax.set_ylabel(iris.feature_names[2])
ax.set_zlabel(iris.feature_names[3])
plot.show()

Training#

x = iris.data
y = iris.target

x_train, x_test, y_train, y_test = train_test_split(
    x, y, train_size=0.7, random_state=12, stratify=y
)
print(f"Number of Training Records (input): {len(x_train)}")
print(f"Number of Training Records (target): {len(y_train)}")

print(f"Number of Test Records (input): {len(x_test)}")
print(f"Number of Test Records (input): {len(x_test)}")

Standardization of features#

sc = StandardScaler()
sc.fit(x_train)
print(f"Mean: {sc.mean_} \nVariance={sc.var_}")
x_train_std = sc.transform(x_train)
x_test_std = sc.transform(x_test)
classifier = Perceptron(max_iter=100, eta0=0.1, random_state=12)

# training
classifier.fit(x_train_std, y_train)

Classification report#

predicted_target = classifier.predict(x_test_std)

# classification report
print(metrics.classification_report(y_test, predicted_target))

Confusion matrix#

cm = confusion_matrix(y_test, predicted_target, normalize="pred")
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=iris.target_names)
disp.plot(cmap=plot.cm.Blues)
cm = confusion_matrix(y_test, predicted_target, normalize="true")
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=iris.target_names)
disp.plot(cmap=plot.cm.Blues)

References#