
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import os

model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
cache_dir = "/opt/deepseek-env/models"

print(f"Downloading {model_name} to {cache_dir}...")

if not os.path.exists(cache_dir):
    os.makedirs(cache_dir, exist_ok=True)

try:
    tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
    model = AutoModelForCausalLM.from_pretrained(
        model_name, 
        cache_dir=cache_dir,
        torch_dtype=torch.bfloat16,
        device_map="auto"
    )
    print("Download complete!")
except Exception as e:
    print(f"Error downloading model: {e}")
