Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
729 changes: 729 additions & 0 deletions monarch_remotemount/README.md

Large diffs are not rendered by default.

10 changes: 10 additions & 0 deletions monarch_remotemount/examples/run_apptainer.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash

rm -f /tmp/overlay.img
apptainer overlay create --size 2048 /tmp/overlay.img

rm -rf /tmp/apptainer-work
mkdir /tmp/apptainer-work

apptainer exec --containall --network none --workdir /tmp/apptainer-work --overlay /tmp/overlay.img /tmp/myapp/img.sif uv pip install requests numpy pandas
apptainer exec --containall --network none --workdir /tmp/apptainer-work --overlay /tmp/overlay.img /tmp/myapp/img.sif python -c "import pandas; print(pandas.__version__)"
7 changes: 7 additions & 0 deletions monarch_remotemount/examples/run_cached_pip.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

set -ex

python -m venv /tmp/myvenv
source /tmp/myvenv/bin/activate
pip install --no-index --find-links /tmp/flat_wheels torch transformers sentencepiece
6 changes: 6 additions & 0 deletions monarch_remotemount/examples/run_disk_large_file.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash

( time cat /scratch/cpuhrsch/myfiledir/myfile.img > /dev/null ) 2> /tmp/total_time
cat /tmp/total_time
( time cat /scratch/cpuhrsch/myfiledir/myfile.img > /dev/null ) 2> /tmp/total_time
cat /tmp/total_time
8 changes: 8 additions & 0 deletions monarch_remotemount/examples/run_disk_time.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#!/bin/bash

find /scratch/cpuhrsch/venv -type f > /tmp/all_files
python -c "import random; random.seed(123); import sys; lines = sys.stdin.read().split('\n')[:-1]; random.shuffle(lines); print('\n'.join(lines))" < /tmp/all_files > /tmp/all_files_shuf

( time xargs -d '\n' cat < /tmp/all_files_shuf > /tmp/bigfile ) 2> /tmp/total_time

cat /tmp/total_time
7 changes: 7 additions & 0 deletions monarch_remotemount/examples/run_hf_example.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

set -ex

source /scratch/cpuhrsch/venv/bin/activate
cd /scratch/cpuhrsch/venv
HF_HOME=/scratch/cpuhrsch/venv/hf_cache python hf_example.py
7 changes: 7 additions & 0 deletions monarch_remotemount/examples/run_import.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

set -ex

source /scratch/cpuhrsch/venv_torch/bin/activate
cd /scratch/cpuhrsch/venv
python -c "import torch; print(torch.randn(123).cuda().mean())"
5 changes: 5 additions & 0 deletions monarch_remotemount/examples/run_import_torch.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/bin/bash

source /scratch/cpuhrsch/venv/bin/activate
( time python -c "import torch" ) 2>&1
( time python -c "import torch" ) 2>&1
43 changes: 43 additions & 0 deletions monarch_remotemount/hf_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import torch
from transformers import pipeline

# 1. Select a sub-10B model
# Microsoft Phi-3 Mini is ~3.8 Billion parameters
model_id = "microsoft/Phi-3-mini-4k-instruct"

print(f"Downloading and loading {model_id}...")

# 2. Initialize the pipeline
# device_map="auto" will automatically use your GPU if available, otherwise CPU
# torch_dtype=torch.float16 reduces memory usage by half (requires GPU usually)
pipe = pipeline(
"text-generation",
model=model_id,
model_kwargs={
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32,
"low_cpu_mem_usage": True,
},
device_map="auto",
)

# 3. Define your prompt
messages = [
{"role": "user", "content": "Explain the concept of recursion to a 5-year-old."},
]

# 4. Run the generation
print("\nGenerating response...")
outputs = pipe(
messages,
max_new_tokens=256,
do_sample=True,
temperature=0.7,
)

# 5. Print the result
generated_text = outputs[0]["generated_text"][-1]["content"]
print("-" * 50)
print(f"Prompt: {messages[0]['content']}")
print("-" * 50)
print(f"Response:\n{generated_text}")
print("-" * 50)
1 change: 1 addition & 0 deletions monarch_remotemount/remotemount/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .remotemount import remotemount
Loading