Skip to content

Instantly share code, notes, and snippets.

@AhmedCoolProjects
Last active December 3, 2025 20:01
Show Gist options
  • Select an option

  • Save AhmedCoolProjects/6344dd2d104fc068d05cf50b5b08f828 to your computer and use it in GitHub Desktop.

Select an option

Save AhmedCoolProjects/6344dd2d104fc068d05cf50b5b08f828 to your computer and use it in GitHub Desktop.
gcn.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyMBVgZrUrJethoUF/5Gf2Id",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/AhmedCoolProjects/6344dd2d104fc068d05cf50b5b08f828/gcn.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"id": "9ru-uTBrygoy"
},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import torch.optim as optim\n",
"\n",
"class GCNLayer(nn.Module):\n",
" def __init__(self, in_feats, out_feats, use_relu=True):\n",
" super().__init__()\n",
" # Define the learnable weights here\n",
" self.linear = nn.Linear(in_feats, out_feats) # this will create W and b\n",
" self.use_relu = use_relu\n",
"\n",
" def forward(self, node_feats, adj_matrix):\n",
" # 1. transform\n",
" trans_feats = self.linear(node_feats)\n",
" # 2. normalization\n",
" I = torch.eye(adj_matrix.shape[0])\n",
" A_tilde = adj_matrix + I\n",
" D_tilde_sqrt = torch.diag(torch.pow(A_tilde.sum(dim=1), -0.5))\n",
" A_norm = D_tilde_sqrt @ A_tilde @ D_tilde_sqrt\n",
" # 3. message passing\n",
" message = A_norm @ trans_feats\n",
" # 4. update\n",
" if self.use_relu:\n",
" return F.relu(message)\n",
" return message\n"
]
},
{
"cell_type": "code",
"source": [
"class GCN(nn.Module):\n",
" def __init__(self, input_dim, hidden_dim, output_dim):\n",
" super().__init__()\n",
" # Layer 1\n",
" self.gcn1 = GCNLayer(input_dim, hidden_dim, use_relu=True)\n",
" # Layer 2\n",
" self.gcn2 = GCNLayer(hidden_dim, output_dim, use_relu=False)\n",
"\n",
" def forward(self, node_feats, adj_matrix):\n",
" x = self.gcn1(node_feats, adj_matrix)\n",
" x = self.gcn2(x, adj_matrix)\n",
" return x\n"
],
"metadata": {
"id": "ahFWzCGAymtS"
},
"execution_count": 3,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# --- Dummy Data ---\n",
"# 4 Nodes, 3 Features each\n",
"node_features = torch.tensor([\n",
" [1.0, 0.0, 0.0], # Node 0\n",
" [0.0, 1.0, 0.0], # Node 1\n",
" [0.0, 0.0, 1.0], # Node 2\n",
" [1.0, 1.0, 0.0] # Node 3\n",
"])\n",
"\n",
"# Adjacency Matrix (4x4)\n",
"adj_matrix = torch.tensor([\n",
" [0, 1, 1, 0],\n",
" [1, 0, 1, 0],\n",
" [1, 1, 0, 1],\n",
" [0, 0, 1, 0]\n",
"], dtype=torch.float32)\n",
"\n",
"# Labels: We want to classify the nodes into Class 0 or Class 1\n",
"# Let's say Node 0 & 1 are Class 0, Node 2 & 3 are Class 1\n",
"labels = torch.tensor([0, 0, 1, 1])\n",
"\n",
"# --- Initialize Model ---\n",
"# Input: 3 features -> Hidden: 4 features -> Output: 2 classes\n",
"model = GCN(input_dim=3, hidden_dim=4, output_dim=2)\n",
"\n",
"# --- Optimizer & Loss ---\n",
"# Optimizer handles the parameter updates (Gradient Descent)\n",
"optimizer = optim.Adam(model.parameters(), lr=0.01)\n",
"# CrossEntropyLoss is standard for classification\n",
"criterion = nn.CrossEntropyLoss()\n",
"\n",
"print(\"Starting Training...\\n\")\n",
"\n",
"for epoch in range(100): # Run for 100 loops\n",
" model.train() # Set model to training mode\n",
"\n",
" # 1. Zero Gradients\n",
" # Clear old gradients from the previous step\n",
" optimizer.zero_grad()\n",
"\n",
" # 2. Forward Pass\n",
" # Get predictions from the model\n",
" output = model(node_features, adj_matrix)\n",
"\n",
" # 3. Calculate Loss\n",
" # Compare output with actual labels\n",
" loss = criterion(output, labels)\n",
"\n",
" # 4. Backward Pass (The Math Magic)\n",
" # PyTorch calculates gradients (dLoss/dW) for all weights automatically\n",
" loss.backward()\n",
"\n",
" # 5. Update Weights\n",
" # Adjust weights: W_new = W_old - (lr * gradient)\n",
" optimizer.step()\n",
"\n",
" if epoch % 10 == 0:\n",
" print(f\"Epoch {epoch} | Loss: {loss.item():.4f}\")\n",
"\n",
"# Final Prediction\n",
"print(\"\\nFinal Node Classifications:\")\n",
"final_output = model(node_features, adj_matrix)\n",
"predicted_classes = final_output.argmax(dim=1)\n",
"print(f\"Predicted: {predicted_classes}\")\n",
"print(f\"Actual: {labels}\")\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "LmrcAT_7ypw4",
"outputId": "bbd9408b-b928-4b32-94c0-1c1c07243d8f"
},
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Starting Training...\n",
"\n",
"Epoch 0 | Loss: 0.6901\n",
"Epoch 10 | Loss: 0.6806\n",
"Epoch 20 | Loss: 0.6728\n",
"Epoch 30 | Loss: 0.6637\n",
"Epoch 40 | Loss: 0.6526\n",
"Epoch 50 | Loss: 0.6377\n",
"Epoch 60 | Loss: 0.6215\n",
"Epoch 70 | Loss: 0.6020\n",
"Epoch 80 | Loss: 0.5817\n",
"Epoch 90 | Loss: 0.5586\n",
"\n",
"Final Node Classifications:\n",
"Predicted: tensor([0, 0, 1, 1])\n",
"Actual: tensor([0, 0, 1, 1])\n"
]
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "GKn-lXi6ysL4"
},
"execution_count": null,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment