3 Classification
3 Classification
1. ID3 ALGORITHM:[pg.no:45-47]
# Load libraries
import pandas as pd
# Load dataset
X = df.values[1:,0:8] # Features
# Split dataset into training set and test set (70% training and 30% test)
test_size=0.3, random_state=1)
max_depth=3, max_features=None,
max_leaf_nodes=None,
min_samples_leaf=5,
min_samples_split=2,
min_weight_fraction_leaf=0.0,presort=False,
random_state=100, splitter='best')
clf_entropy= clf_entropy.fit(X_train,y_train)
y_pred_en = clf_entropy.predict(X_test)
print(y_pred_en)
tree.plot_tree(clf_entropy)
plt.show()
PROGRAM:
'high', 'medium']
le = preprocessing.LabelEncoder()
age_encoded = le.fit_transform(age)
print(age_encoded)
income_encoded = le.fit_transform(income)
print(income_encoded)
student_encoded = le.fit_transform(student)
print(student_encoded)
credit_encoded = le.fit_transform(credit_rating)
print(credit_encoded)
label = le.fit_transform(buys_computer)
print(label)
# Combining age, income, student, and credit rating into a single list of tuples
student_encoded, credit_encoded))
model = GaussianNB()
model.fit(features, label)
# Predict output
Output:
PROGRAM:
# Load dataset
wine = datasets.load_wine()
print("Features:", wine.feature_names)
print("Labels:", wine.target_names)
print(wine.data.shape)
print(wine.data[:5])
print(wine.target)
# Split dataset into training set and test set (70% training and 30% test)
random_state=109)
gnb = GaussianNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
print("Predicted Labels:",y_pred)
Output:
4. LINEAR KERNEL:[pg.no:59-61]
PROGRAM:
# Load libraries
import pandas as pd
import seaborn as sn
# Load dataset
df = pd.read_csv("iris.csv", names=column_names)
clf = SVC(kernel='linear')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(y_pred)
# Calculate accuracy
print(classification_report(y_test, y_pred))
cm = pd.crosstab(y_test,y_pred,rownames=['Actual'],colnames=['Predicted'])
ax = sn.heatmap(cm, annot=True)
plt.show()
Output:
5. POLYNOMINAL KERNEL:[pg.no:61-63]
PROGRAM:
# Import libraries
import pandas as pd
import seaborn as sn
'petal-width', 'Class']
df = pd.read_csv("iris.csv", names=colnames)
test_size=0.3, random_state=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(y_pred)
print("Accuracy:", accuracy*100)
print(classification_report(y_test, y_pred))
rownames=['Actual'],colnames=['Predicted'])
ax = sn.heatmap(confusion_matrix, annot=True)
plt.show()
Output:
6. RADIAL BASIS FUNCTION KERNEL:[pg.no:63-65]
PROGRAM:
# Load libraries
import pandas as pd
import seaborn as sn
'petal-width', 'Class']
X = dataset.drop('Class', axis=1)
y = dataset['Class']
# Split dataset into training set and test set (70% training and 30% test)
test_size=0.3, random_state=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(classification_report(y_test, y_pred))
# Generate and display confusion matrix as a heatmap
rownames=['Actual'], colnames=['Predicted'])
sn.heatmap(confusion_matrix, annot=True)
plt.show()
7. K-NEAREST NEIGHBOURS:[pg.no:66-68]
PROGRAM:
# Load libraries
import pandas as pd
import numpy as np
# Load dataset
df = pd.read_csv("diabetes.csv", header=None)
X = df.values[1:,0:8] # Features
clf = KNeighborsClassifier(n_neighbors=5)
y_pred = clf.predict(X_test)
print(y_pred)
# Import module for classification report and confusion matrix from sklearn.metrics
print("Confusion Matrix:")
print(confusion_matrix(y_test, y_pred))
print("Classification Report:")
print(classification_report(y_test, y_pred))
error = []
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
plt.figure(figsize=(12, 6))
plt.xlabel('K Value')
plt.ylabel('Mean Error')
plt.show()
8. RANDOM FOREST :[pg.no:70-71]
PROGRAM:
# Load libraries
import pandas as pd
import seaborn as sn
# Load dataset
df = pd.read_csv("iris.csv")
'petal-width', 'Class']
df.columns = colnames
test_size=0.3, random_state=1)
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Predictions:", y_pred)
print("Classification Report:")
print(classification_report(y_test, y_pred))
print("Confusion Matrix:")
cm = confusion_matrix(y_test, y_pred)
print(cm)
plt.figure(figsize=(8, 6))
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()