#!/bin/python3 # A small example of using the Keras library to create a neoral network # and train it on a data set created randomly using NumPy. # AuthoR: Dana Vrajitoru and ... # Class: C462/B551/I400 Artificial Intelligence # Updated: Fall 2025 import keras import os from keras.models import Sequential from keras.layers import Dense import numpy as np #print(keras.__version__) os.environ["KERAS_BACKEND"] = "tensorflow" def test_sequential(): # 1. Generate synthetic data # 1000 samples, 2 features X = np.random.rand(1000, 2) * 10 print("Input: ", X) # Create labels based on a simple rule: if sum of features > 10, # class 1, else class 0 y = (X[:, 0] + X[:, 1] > 10).astype(int) print("Output: ", y) # 2. Define the Keras model using the Sequential API model = Sequential() # Add a hidden layer with 10 neurons and ReLU activation model.add(Dense(10, input_dim=2, activation='relu')) # Add the output layer with 1 neuron (for binary classification) # and sigmoid activation model.add(Dense(1, activation='sigmoid')) # 3. Compile the model # Use binary cross-entropy for binary classification, Adam # optimizer, and accuracy as a metric model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # 4. Train the model # Train for 50 epochs with a batch size of 32 model.fit(X, y, epochs=50, batch_size=32, verbose=0) # verbose=0 to suppress training output # 5. Evaluate the model loss, accuracy = model.evaluate(X, y, verbose=0) print(f"Model Accuracy: {accuracy*100:.2f}%") # 6. Make predictions # Predict on a few new samples new_samples = np.array([[1, 2], [8, 9], [3, 7], [1, 9]]) predictions = model.predict(new_samples) print("\nPredictions for new samples:") for i, sample in enumerate(new_samples): predicted_class = int(predictions[i][0] > 0.5) # Convert probability to binary class print(f"Sample: {sample}, Predicted probability: {predictions[i][0]:.2f}, Predicted class: {predicted_class}") if __name__ == '__main__': test_sequential()