import streamlit as st
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import yaml
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import sys
import os
# Add current directory to path to import local modules
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from trainer import *
# Set page config
st.set_page_config(
page_title="Neural Inventory Control Demo",
page_icon="📦",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS for better styling
st.markdown("""
""", unsafe_allow_html=True)
@st.cache_data
def load_configuration():
"""Load configuration files"""
try:
config_setting_file = 'config_files/settings/many_warehouses_real_data_lost_demand.yml'
config_hyperparams_file = 'config_files/policies_and_hyperparams/gnn.yml'
with open(config_setting_file, 'r') as file:
config_setting = yaml.safe_load(file)
with open(config_hyperparams_file, 'r') as file:
config_hyperparams = yaml.safe_load(file)
return config_setting, config_hyperparams
except Exception as e:
st.error(f"Error loading configuration: {e}")
return None, None
@st.cache_resource
def initialize_model_and_data():
"""Initialize model and data loaders"""
try:
config_setting, config_hyperparams = load_configuration()
if config_setting is None or config_hyperparams is None:
return None, None, None, None, None, None
# Extract configuration
setting_keys = 'seeds', 'test_seeds', 'problem_params', 'params_by_dataset', 'observation_params', 'store_params', 'warehouse_params', 'echelon_params', 'sample_data_params'
hyperparams_keys = 'trainer_params', 'optimizer_params', 'nn_params'
seeds, test_seeds, problem_params, params_by_dataset, observation_params, store_params, warehouse_params, echelon_params, sample_data_params = [
config_setting[key] for key in setting_keys
]
trainer_params, optimizer_params, nn_params = [config_hyperparams[key] for key in hyperparams_keys]
observation_params = DefaultDict(lambda: None, observation_params)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
dataset_creator = DatasetCreator()
# Create scenario and datasets
if sample_data_params['split_by_period']:
scenario = Scenario(
periods=None,
problem_params=problem_params,
store_params=store_params,
warehouse_params=warehouse_params,
echelon_params=echelon_params,
num_samples=params_by_dataset['train']['n_samples'],
observation_params=observation_params,
seeds=seeds
)
train_dataset, dev_dataset, test_dataset = dataset_creator.create_datasets(
scenario,
split=True,
by_period=True,
periods_for_split=[sample_data_params[k] for k in ['train_periods', 'dev_periods', 'test_periods']],
)
else:
scenario = Scenario(
periods=params_by_dataset['train']['periods'],
problem_params=problem_params,
store_params=store_params,
warehouse_params=warehouse_params,
echelon_params=echelon_params,
num_samples=params_by_dataset['train']['n_samples'] + params_by_dataset['dev']['n_samples'],
observation_params=observation_params,
seeds=seeds
)
train_dataset, dev_dataset = dataset_creator.create_datasets(scenario, split=True, by_sample_indexes=True, sample_index_for_split=params_by_dataset['dev']['n_samples'])
# Create data loaders
train_loader = DataLoader(train_dataset, batch_size=min(4, params_by_dataset['train']['batch_size']), shuffle=True)
# Create model
neural_net_creator = NeuralNetworkCreator
model = neural_net_creator().create_neural_network(scenario, nn_params, device=device)
# Create simulator
simulator = Simulator(device=device)
return model, train_loader, simulator, problem_params, observation_params, params_by_dataset
except Exception as e:
st.error(f"Error initializing model and data: {e}")
return None, None, None, None, None, None
def display_input_data(sample_batch, observation, problem_params):
"""Display input data structure and values"""
st.markdown('
', unsafe_allow_html=True)
col1, col2 = st.columns(2)
with col1:
st.markdown("**🏢 System Configuration:**")
st.metric("Number of Stores", problem_params['n_stores'])
st.metric("Number of Warehouses", problem_params['n_warehouses'])
st.metric("Batch Size", sample_batch['demands'].shape[0])
with col2:
st.markdown("**📊 Data Dimensions:**")
for key, value in sample_batch.items():
if hasattr(value, 'shape'):
st.write(f"• **{key}**: {list(value.shape)}")
# Display observation structure
st.markdown("**🔍 Model Input (Observation):**")
obs_data = []
for key, value in observation.items():
if hasattr(value, 'shape'):
obs_data.append({
'Component': key,
'Shape': str(list(value.shape)),
'Description': get_component_description(key)
})
if obs_data:
df_obs = pd.DataFrame(obs_data)
st.dataframe(df_obs, use_container_width=True)
# Show sample values
st.markdown("**📋 Sample Values (First Item):**")
col1, col2, col3 = st.columns(3)
with col1:
if 'store_inventories' in observation:
store_inv = observation['store_inventories'][0].squeeze()
st.metric("Store Inventories", f"{store_inv.sum().item():.2f} (total)")
with col2:
if 'holding_costs' in observation:
holding_costs_tensor = observation['holding_costs'][0]
if holding_costs_tensor.numel() == 1:
holding_cost = holding_costs_tensor.item()
st.metric("Holding Cost", f"{holding_cost:.4f}")
else:
avg_holding_cost = holding_costs_tensor.mean().item()
st.metric("Avg Holding Cost", f"{avg_holding_cost:.4f}")
with col3:
if 'underage_costs' in observation:
underage_costs_tensor = observation['underage_costs'][0]
if underage_costs_tensor.numel() == 1:
underage_cost = underage_costs_tensor.item()
st.metric("Underage Cost", f"{underage_cost:.4f}")
else:
avg_underage_cost = underage_costs_tensor.mean().item()
st.metric("Avg Underage Cost", f"{avg_underage_cost:.4f}")
def get_component_description(key):
"""Get description for observation components"""
descriptions = {
'store_inventories': 'Current inventory levels at each store over lead time periods',
'warehouse_inventories': 'Current inventory levels at each warehouse',
'holding_costs': 'Cost per unit of inventory held',
'underage_costs': 'Penalty cost per unit of unmet demand',
'lead_times': 'Time delay between ordering and receiving inventory',
'current_period': 'Current time step in the simulation',
'demands': 'Historical and forecasted demand data'
}
return descriptions.get(key, 'Model input component')
def display_output_data(action, observation):
"""Display model output data"""
st.markdown('', unsafe_allow_html=True)
col1, col2 = st.columns(2)
with col1:
st.markdown("**🎯 Action Structure:**")
action_data = []
for key, value in action.items():
if hasattr(value, 'shape'):
action_data.append({
'Action Type': key,
'Shape': str(list(value.shape)),
'Description': 'Inventory orders for ' + key
})
if action_data:
df_action = pd.DataFrame(action_data)
st.dataframe(df_action, use_container_width=True)
with col2:
st.markdown("**📊 Order Quantities:**")
if 'stores' in action:
store_orders = action['stores'][0].squeeze()
if store_orders.numel() == 1:
st.metric("Store Orders", f"{store_orders.item():.4f}")
else:
st.metric("Total Store Orders", f"{store_orders.sum().item():.4f}")
st.write(f"Per store: {store_orders[:5].tolist()}" + ("..." if len(store_orders) > 5 else ""))
if 'warehouses' in action and action['warehouses'] is not None:
warehouse_orders = action['warehouses'][0].squeeze()
if warehouse_orders.numel() == 1:
st.metric("Warehouse Orders", f"{warehouse_orders.item():.4f}")
else:
st.metric("Total Warehouse Orders", f"{warehouse_orders.sum().item():.4f}")
def create_inventory_visualization(all_observations, all_actions):
"""Create interactive visualizations for inventory decisions"""
st.markdown('', unsafe_allow_html=True)
# Prepare data for visualization
periods = []
store_inventories = []
store_orders = []
warehouse_orders = []
for i, (obs, action) in enumerate(zip(all_observations, all_actions)):
periods.append(obs['period'] + 1)
# Store inventory
if obs['store_inventories'] is not None:
store_inv = obs['store_inventories'][:, 0].sum().item()
store_inventories.append(store_inv)
else:
store_inventories.append(0)
# Store orders
store_ord = action['stores'].sum().item()
store_orders.append(store_ord)
# Warehouse orders
if action['warehouses'] is not None:
warehouse_ord = action['warehouses'].sum().item()
warehouse_orders.append(warehouse_ord)
else:
warehouse_orders.append(0)
# Create subplot
fig = make_subplots(
rows=2, cols=2,
subplot_titles=('Store Inventory Levels', 'Store Orders', 'Warehouse Orders', 'Combined View'),
specs=[[{"secondary_y": False}, {"secondary_y": False}],
[{"secondary_y": False}, {"secondary_y": True}]]
)
# Store inventory levels
fig.add_trace(
go.Scatter(x=periods, y=store_inventories, name='Store Inventory', line=dict(color='blue')),
row=1, col=1
)
# Store orders
fig.add_trace(
go.Bar(x=periods, y=store_orders, name='Store Orders', marker_color='green'),
row=1, col=2
)
# Warehouse orders
fig.add_trace(
go.Bar(x=periods, y=warehouse_orders, name='Warehouse Orders', marker_color='orange'),
row=2, col=1
)
# Combined view
fig.add_trace(
go.Scatter(x=periods, y=store_inventories, name='Inventory', line=dict(color='blue')),
row=2, col=2
)
fig.add_trace(
go.Bar(x=periods, y=store_orders, name='Store Orders', marker_color='green', opacity=0.7),
row=2, col=2
)
fig.update_layout(height=600, showlegend=True, title_text="Neural Network Inventory Control Decisions")
st.plotly_chart(fig, use_container_width=True)
# Summary table
summary_data = {
'Period': periods,
'Store Inventory': [f"{inv:.2f}" for inv in store_inventories],
'Store Orders': [f"{ord:.2f}" for ord in store_orders],
'Warehouse Orders': [f"{ord:.2f}" for ord in warehouse_orders]
}
df_summary = pd.DataFrame(summary_data)
st.markdown("**📋 Detailed Summary:**")
st.dataframe(df_summary, use_container_width=True)
def run_simulation_demo(model, simulator, train_loader, problem_params, observation_params, params_by_dataset):
"""Run the main simulation demo"""
st.markdown('', unsafe_allow_html=True)
# Get sample batch
sample_batch = next(iter(train_loader))
# Initialize simulator
with st.spinner("Initializing simulation..."):
observation, _ = simulator.reset(
periods=5,
problem_params=problem_params,
data=sample_batch,
observation_params=observation_params
)
# Display input data
display_input_data(sample_batch, observation, problem_params)
# Run simulation
st.markdown('', unsafe_allow_html=True)
all_observations = []
all_actions = []
progress_bar = st.progress(0)
status_text = st.empty()
for period in range(5):
status_text.text(f'Running period {period + 1}/5...')
progress_bar.progress((period + 1) / 5)
# Store current state
all_observations.append({
'period': period,
'store_inventories': observation['store_inventories'][0].clone(),
'warehouse_inventories': observation.get('warehouse_inventories', [None])[0].clone() if 'warehouse_inventories' in observation else None
})
# Get model decision
observation_with_internal = {k: v for k, v in observation.items()}
observation_with_internal['internal_data'] = simulator._internal_data
with torch.no_grad():
action = model(observation_with_internal)
# Store the action
all_actions.append({
'period': period,
'stores': action['stores'][0].clone(),
'warehouses': action.get('warehouses', [None])[0].clone() if 'warehouses' in action else None
})
# Execute action
observation, reward, terminated, _, _ = simulator.step(action)
if terminated:
break
status_text.text('Simulation completed!')
# Display output for the last action
if all_actions:
last_action = {k: v.unsqueeze(0) if v is not None and hasattr(v, 'unsqueeze') else v for k, v in all_actions[-1].items() if k != 'period'}
display_output_data(last_action, observation)
# Create visualizations
create_inventory_visualization(all_observations, all_actions)
def main():
"""Main Streamlit app"""
st.markdown('📦 Neural Inventory Control Demo
', unsafe_allow_html=True)
# Sidebar
st.sidebar.header("🛠️ Configuration")
st.sidebar.markdown("""
This demo showcases a Graph Neural Network (GNN) model for multi-warehouse inventory optimization using real retail data from the Favorita dataset.
""")
# Load model and data
with st.spinner("Loading model and data..."):
model, train_loader, simulator, problem_params, observation_params, params_by_dataset = initialize_model_and_data()
if model is None:
st.error("Failed to load model and data. Please check your configuration files and dependencies.")
return
st.success("✅ Model and data loaded successfully!")
# Model information
st.sidebar.markdown("### 📊 Model Information")
st.sidebar.info(f"""
- **Model Type**: Graph Neural Network (GNN)
- **Stores**: {problem_params['n_stores']}
- **Warehouses**: {problem_params['n_warehouses']}
- **Device**: {'GPU' if torch.cuda.is_available() else 'CPU'}
""")
# Main demo
tab1, tab2, tab3 = st.tabs(["🚀 Live Demo", "📖 About", "⚙️ Technical Details"])
with tab1:
st.markdown("""
🎯 What you'll see:
- Input Data: Current inventory levels, costs, demand forecasts
- Model Processing: Neural network analyzes the data
- Output Decisions: Inventory orders for stores and warehouses
- Visualization: Interactive charts showing decisions over time
""", unsafe_allow_html=True)
if st.button("🚀 Run Simulation Demo", type="primary"):
run_simulation_demo(model, simulator, train_loader, problem_params, observation_params, params_by_dataset)
with tab2:
st.markdown("""
## 📖 About This Demo
This demo showcases an AI-powered inventory control system that uses deep learning to optimize inventory decisions across multiple stores and warehouses.
### 🎯 Problem Being Solved
- **Multi-echelon inventory optimization**: Managing inventory across stores and warehouses
- **Demand uncertainty**: Handling unpredictable customer demand
- **Cost optimization**: Minimizing holding costs while avoiding stockouts
### 🧠 How It Works
1. **Input**: Current inventory levels, costs, lead times, demand forecasts
2. **AI Processing**: Graph Neural Network analyzes relationships between stores/warehouses
3. **Output**: Optimal inventory orders for each location
4. **Learning**: Model learns from real retail data (Favorita dataset)
### 📊 Key Features
- Real-time decision making
- Multi-location optimization
- Cost-aware ordering
- Demand forecasting integration
""")
with tab3:
st.markdown("""
## ⚙️ Technical Details
### 🏗️ Architecture
- **Model**: Graph Neural Network (GNN)
- **Framework**: PyTorch
- **Environment**: OpenAI Gymnasium
- **Data**: Favorita retail dataset (Ecuador)
### 📈 Training Process
- **Loss Function**: Policy gradient with inventory costs
- **Optimizer**: Adam
- **Validation**: Time-series split
- **Metrics**: Total inventory cost, service level
### 🔧 Configuration
- Stores: 21 retail locations
- Warehouses: 3 distribution centers
- Time horizon: 52 weeks
- Batch size: Configurable
### 📁 Project Structure
```
Neural_inventory_control/
├── main_run.ipynb # Training notebook
├── trainer.py # Training logic
├── neural_networks.py # Model architectures
├── environment.py # Simulation environment
├── data_handling.py # Data processing
└── config_files/ # Configuration files
```
""")
if __name__ == "__main__":
main()