Neural Networks for Scientific Computing
Introduction to Neural Networks
Neural networks are powerful tools for solving complex problems in scientific computing and physics.
Basic Neural Network Structure
import tensorflow as tf
import numpy as np
# Create a simple neural network
def create_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(1,)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(1)
])
model.compile(optimizer='adam', loss='mse')
return model
# Generate training data
x = np.linspace(-5, 5, 1000)
y = x**2 + np.random.normal(0, 0.1, 1000)
# Train model
model = create_model()
model.fit(x, y, epochs=100, validation_split=0.2)
Advanced Architectures
Explore different neural network architectures for scientific applications.
Convolutional Neural Network
import tensorflow as tf
# Create a CNN for image analysis
def create_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(64, 64, 1)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
model.compile(optimizer='adam', loss='mse')
return model
# Generate 2D physics data (e.g., field distributions)
def generate_field_data(n_samples=1000):
x = np.linspace(-5, 5, 64)
y = np.linspace(-5, 5, 64)
X, Y = np.meshgrid(x, y)
# Example: Electric field from point charge
R = np.sqrt(X**2 + Y**2)
field = 1 / (R + 1e-6)
return field.reshape(64, 64, 1)
Custom Loss Functions
Learn how to incorporate physical constraints into neural network training.
Physics-Constrained Loss
import tensorflow as tf
class PhysicsModel(tf.keras.Model):
def __init__(self):
super(PhysicsModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(64, activation='relu')
self.dense2 = tf.keras.layers.Dense(32, activation='relu')
self.dense3 = tf.keras.layers.Dense(1)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.dense3(x)
def physics_loss(self, y_true, y_pred):
# Example: Conservation of energy constraint
dy_dx = tf.gradients(y_pred, self.inputs)[0]
physics_constraint = tf.reduce_mean(tf.square(dy_dx + y_pred))
mse = tf.reduce_mean(tf.square(y_true - y_pred))
return mse + 0.1 * physics_constraint
# Create and train model
model = PhysicsModel()
model.compile(optimizer='adam', loss=model.physics_loss)
Practice Exercises
Exercise 1: PDE Solver
Build a neural network to solve the heat equation PDE.
Exercise 2: System Identification
Use neural networks to identify the underlying physics of a dynamical system from data.