List of Topics:
Location Research Breakthrough Possible @S-Logix pro@slogix.in

Office Address

Social List

How to Build a Convolutional Neural Network (CNN) for Multi-Class Satellite Image Classification

CNN for Multi-Class Satellite Image Classification

Conditions for Building an Effective Convolutional Neural Network (CNN) for Multi-Class Satellite Image Classification

  • Description:
    This code implements a Convolutional Neural Network (CNN) to classify satellite images into four categories: cloudy, water, green, and desert. It preprocesses the dataset by resizing, normalizing, and splitting the images, then trains the model using Conv2D layers for feature extraction and dense layers for classification. The model's performance is evaluated using metrics like accuracy, F1-score, and a confusion matrix.
Step-by-Step Process
  • Load Libraries:
    Load necessary libraries for image processing, data handling, and building deep learning models.
  • Retrieve and Visualize Data:
    Retrieve satellite images from folders, categorize them based on labels, and visualize samples.
  • Preprocess Images:
    Resize, normalize, and label images for training.
  • Split Data:
    Create training and testing datasets for evaluation.
  • Build CNN Model:
    Use Conv2D layers for feature extraction and dense layers for classification.
  • Evaluate and Analyze:
    Evaluate model performance using metrics like accuracy, F1-score, and confusion matrix.
Sample Source Code
  • # Import Necessary Libraries
    import numpy as np
    import os
    from PIL import Image
    import random
    import matplotlib.pyplot as plt
    from sklearn.preprocessing import MinMaxScaler
    from sklearn.model_selection import train_test_split
    from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense
    from tensorflow.keras.models import Model

    import warnings
    warnings.filterwarnings("ignore")

    from sklearn.metrics import (classification_report, confusion_matrix, accuracy_score,
    f1_score, recall_score, precision_score)

    path = "/home/soft12/Downloads/sample_dataset/Website/Dataset/data"
    files = os.listdir(path)

    cloudy_files = os.listdir(path+'/'+files[0])
    water_files = os.listdir(path+'/'+files[1])
    green_files = os.listdir(path+'/'+files[2])
    desert_files = os.listdir(path+'/'+files[3])

    def plot_images(images, paths, folder):
    paths = paths+'/'+folder
    sample_images = random.sample(images, 5)
    plt.figure(figsize=(15, 10))
    for i, img_file in enumerate(sample_images):
    img_path = os.path.join(paths, img_file)
    img = Image.open(img_path)
    plt.subplot(1, 5, i+1)
    plt.imshow(img)
    plt.axis('off')
    plt.title(f"Image {i+1}")
    plt.tight_layout()
    plt.show()
    plot_images(cloudy_files, path, files[0])
    plot_images(water_files, path, files[1])
    plot_images(green_files, path, files[2])
    plot_images(desert_files, path, files[3])

    def image_process(path, images, folder):
    path = path+'/'+folder
    final_feature = []
    for i in images[:250]:
    image = Image.open(path+'/'+i)
    if image.mode != 'RGB':
    image = image.convert('RGB')
    resize_img = image.resize((224, 224))
    img_array = np.array(resize_img)
    final_feature.append(img_array)
    return np.array(final_feature)
    cloudy_features = image_process(path, cloudy_files, files[0])
    water_features = image_process(path, water_files, files[1])
    green_features = image_process(path, green_files, files[2])
    desert_features = image_process(path, desert_files, files[3])

    x = np.concatenate((cloudy_features, water_features, green_features, desert_features), axis=0)
    y_cloudy = [0 for i in range(len(cloudy_features))]
    y_water = [1 for i in range(len(cloudy_features))]
    y_green = [2 for i in range(len(cloudy_features))]
    y_desert = [3 for i in range(len(cloudy_features))]
    y = np.concatenate((y_cloudy, y_water, y_green, y_desert), axis=0)
    x_reshaped = x.reshape(x.shape[0], -1)
    scaler = MinMaxScaler()
    x_scaled = scaler.fit_transform(x_reshaped)
    x_scaled_reshaped = x_scaled.reshape(x.shape[0], x.shape[1], x.shape[2], x.shape[3])
    X_train, X_test, y_train, y_test = train_test_split(x_scaled_reshaped, y, test_size=.2, random_state=42)
    def Conv2d_model(input_shape, num_classes):
    inputs = Input(shape=input_shape)
    x = Conv2D(64, (3, 3), activation='relu')(inputs)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(128, (3, 3), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Flatten()(x)
    x = Dense(64, activation='relu')(x)
    x = Dense(32, activation='relu')(x)
    outputs = Dense(num_classes, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    return model
    num_classes = 4
    input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3])
    model = Conv2d_model(input_shape, num_classes)
    model.summary()
    history = model.fit(X_train, y_train, batch_size=16, epochs=10, validation_data=(X_test, y_test))
    y_pred = model.predict(X_test)
    print(classification_report(y_test, y_pred))
    print(confusion_matrix(y_test, y_pred))

Screenshots
  • CNN Output Screenshot