Skip to content

Efficiency Hacks

Maximize your productivity with Claude Code through clever shortcuts, automation tricks, and time-saving patterns. This guide reveals power-user techniques that can dramatically speed up your development workflow.

Instant Commands

Terminal window
# Create aliases for common operations
alias ccode='claudecode'
alias ccreview='claude review'
alias cctest='claude "Generate tests for"'
alias ccdoc='claude "Document"'
alias ccfix='claude "Fix the error:"'

Template Library

Terminal window
# Build reusable prompt templates
echo 'Refactor for performance' > ~/.claude-templates/perf
echo 'Add error handling' > ~/.claude-templates/errors
echo 'Generate REST API' > ~/.claude-templates/api
# Use templates
ccode "$(cat ~/.claude-templates/perf) $1"
  1. Parallel file processing

    Terminal window
    # Process multiple files in parallel
    find . -name "*.js" -type f | \
    parallel -j 4 "claude 'Add JSDoc comments to {}' --edit"
    # Batch refactoring with progress
    ls src/**/*.ts | pv -l | \
    xargs -P 4 -I {} claude "Modernize TypeScript in {}" --edit
  2. Smart file selection

    Terminal window
    # Process only changed files
    git diff --name-only main | \
    grep -E '\.(js|ts)$' | \
    xargs -I {} claude "Review and improve {}"
    # Focus on complex files
    find . -name "*.js" -exec wc -l {} + | \
    sort -rn | head -20 | awk '{print $2}' | \
    xargs -I {} claude "Simplify complex code in {}"
  3. Conditional processing

    Terminal window
    # Only process files with TODOs
    grep -l "TODO" src/**/*.js | \
    xargs -I {} claude "Implement TODOs in {}" --edit
    # Fix files with linting errors
    eslint . --format json | \
    jq -r '.[] | select(.errorCount > 0) | .filePath' | \
    xargs -I {} claude "Fix ESLint errors in {}" --edit

Power Functions

Terminal window
# Add to ~/.bashrc or ~/.zshrc
# Quick code review for current branch
ccreview-branch() {
local base="${1:-main}"
git diff "$base"...HEAD --name-only | \
xargs -I {} claude review {} --format summary
}
# Generate tests for modified functions
cctest-changed() {
git diff --name-only | \
xargs -I {} claude "Generate tests for changed functions in {}" \
--output "{}.test.${{}##*.}"
}
# Instant documentation
ccdoc-func() {
local file="$1"
local line="$2"
claude "Document the function at line $line in $file" --edit
}
# Fix and format
ccfix-format() {
claude "Fix errors and format code in $1" --edit && \
prettier --write "$1" && \
eslint --fix "$1"
}
# Explain complex code
ccexplain() {
local file="$1"
local start="$2"
local end="${3:-$((start+20))}"
sed -n "${start},${end}p" "$file" | \
claude "Explain this code in simple terms"
}
# Performance analysis
ccperf() {
claude "Analyze performance bottlenecks in $1 and suggest optimizations"
}
# Security audit
ccsecurity() {
claude "Audit $1 for security vulnerabilities and suggest fixes"
}
{
"Claude Review": {
"prefix": "ccrev",
"body": [
"!claude review ${TM_FILENAME} --focus '${1:security,performance}'"
],
"description": "Review current file with Claude"
},
"Claude Test": {
"prefix": "cctest",
"body": [
"!claude 'Generate comprehensive tests for ${TM_FILENAME}' --output ${TM_FILENAME_BASE}.test.${TM_FILENAME_EXT}"
],
"description": "Generate tests for current file"
},
"Claude Refactor": {
"prefix": "ccref",
"body": [
"!claude 'Refactor the ${1:function} in ${TM_FILENAME} for ${2:clarity and performance}' --edit"
],
"description": "Refactor with Claude"
},
"Claude Implement": {
"prefix": "ccimp",
"body": [
"// TODO: Implement ${1:feature}",
"!claude 'Implement the TODO above in ${TM_FILENAME}' --edit"
],
"description": "Implement TODO with Claude"
}
}

VS Code Keybindings

keybindings.json
[
{
"key": "ctrl+alt+r",
"command": "workbench.action.terminal.sendSequence",
"args": {
"text": "claude review '${file}' --format summary\n"
}
},
{
"key": "ctrl+alt+t",
"command": "workbench.action.terminal.sendSequence",
"args": {
"text": "claude 'Generate tests for ${file}' --output '${fileDirname}/__tests__/${fileBasename}'\n"
}
},
{
"key": "ctrl+alt+f",
"command": "workbench.action.terminal.sendSequence",
"args": {
"text": "claude 'Fix linting errors in ${file}' --edit\n"
}
},
{
"key": "ctrl+alt+d",
"command": "workbench.action.terminal.sendSequence",
"args": {
"text": "claude 'Add comprehensive documentation to ${file}' --edit\n"
}
}
]
  1. Morning startup script

    morning-startup.sh
    #!/bin/bash
    echo "🌅 Good morning! Starting daily workflow..."
    # Update dependencies and check for issues
    echo "📦 Checking dependencies..."
    npm outdated | claude "Analyze outdated packages and suggest update strategy"
    # Review yesterday's TODOs
    echo "📝 Reviewing TODOs..."
    git log --since="yesterday" --grep="TODO" --oneline | \
    claude "Summarize yesterday's TODOs and suggest priorities"
    # Check for overnight CI failures
    echo "🔍 Checking CI status..."
    gh run list --limit 10 --json conclusion,name | \
    claude "Analyze CI failures and suggest fixes"
    # Generate daily plan
    echo "📅 Creating daily plan..."
    claude "Based on the above information, create a prioritized task list for today"
  2. Code quality monitor

    quality-monitor.sh
    #!/bin/bash
    # Run in background throughout the day
    while true; do
    # Check for code smells in recently modified files
    git diff --name-only HEAD~5..HEAD | \
    xargs -I {} claude "Quick check for code smells in {}" \
    --format json > quality-report.json
    # Alert if issues found
    if jq -e '.issues | length > 0' quality-report.json > /dev/null; then
    notify-send "Code Quality Alert" "Issues found in recent changes"
    fi
    sleep 3600 # Check every hour
    done

Intelligent Git Hooks

.git/hooks/prepare-commit-msg
#!/bin/bash
# Auto-generate commit messages
COMMIT_MSG_FILE=$1
COMMIT_SOURCE=$2
if [ -z "$COMMIT_SOURCE" ]; then
# Get staged changes
CHANGES=$(git diff --cached --name-status)
# Generate commit message
GENERATED_MSG=$(echo "$CHANGES" | \
claude "Generate a conventional commit message for these changes. Be concise.")
# Prepend to commit message file
echo "$GENERATED_MSG" > "$COMMIT_MSG_FILE.tmp"
echo "" >> "$COMMIT_MSG_FILE.tmp"
echo "# Generated by Claude Code. Edit as needed." >> "$COMMIT_MSG_FILE.tmp"
cat "$COMMIT_MSG_FILE" >> "$COMMIT_MSG_FILE.tmp"
mv "$COMMIT_MSG_FILE.tmp" "$COMMIT_MSG_FILE"
fi
# .git/hooks/post-merge
#!/bin/bash
# Auto-handle merge conflicts
if [ -f .git/MERGE_HEAD ]; then
# Find files with conflicts
CONFLICTED=$(git diff --name-only --diff-filter=U)
if [ -n "$CONFLICTED" ]; then
echo "🤖 Claude Code is analyzing merge conflicts..."
for file in $CONFLICTED; do
claude "Resolve merge conflicts in $file intelligently, preserving both changes where possible" \
--edit
done
echo "✅ Conflicts resolved. Please review the changes."
fi
fi

Relevant File Finder

find-context.py
#!/usr/bin/env python3
import os
import ast
import networkx as nx
from pathlib import Path
def build_dependency_graph(root_dir):
"""Build a graph of file dependencies"""
graph = nx.DiGraph()
for path in Path(root_dir).rglob("*.py"):
graph.add_node(str(path))
try:
with open(path) as f:
tree = ast.parse(f.read())
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for alias in node.names:
dep_path = find_file(alias.name, root_dir)
if dep_path:
graph.add_edge(str(path), dep_path)
except:
pass
return graph
def find_related_files(target_file, graph, max_depth=2):
"""Find files related to target within max_depth"""
related = set()
# Find dependencies and dependents
if target_file in graph:
# Direct dependencies
related.update(graph.successors(target_file))
# Direct dependents
related.update(graph.predecessors(target_file))
# Nth-degree relationships
for _ in range(max_depth - 1):
new_related = set()
for file in related:
new_related.update(graph.successors(file))
new_related.update(graph.predecessors(file))
related.update(new_related)
return list(related)[:10] # Limit to 10 most relevant
# Use in Claude Code
if __name__ == "__main__":
import sys
target = sys.argv[1]
graph = build_dependency_graph(".")
related = find_related_files(target, graph)
# Build Claude command with context
cmd = f"claude 'Analyze {target}' --context {target}"
for file in related:
cmd += f" --context {file}"
print(cmd)
os.system(cmd)

Semantic Chunks

chunk-context.sh
#!/bin/bash
# Split large files into semantic chunks
chunk_file() {
local file="$1"
local chunk_size=50
# Extract functions/classes as chunks
grep -n "^def \|^class " "$file" | while read -r line; do
line_num=$(echo "$line" | cut -d: -f1)
func_name=$(echo "$line" | cut -d' ' -f2)
# Extract function with context
sed -n "$((line_num-5)),$((line_num+chunk_size))p" "$file" > "/tmp/chunk_${func_name}.py"
done
}
# Use chunks for focused analysis
analyze_with_chunks() {
local file="$1"
local query="$2"
chunk_file "$file"
# Find relevant chunks
relevant_chunks=$(ls /tmp/chunk_*.py | \
xargs -I {} sh -c "grep -l '$query' {} || true")
if [ -n "$relevant_chunks" ]; then
claude "Analyze regarding '$query'" \
$(echo $relevant_chunks | xargs -n1 echo "--context")
else
claude "Analyze $file regarding '$query'"
fi
# Cleanup
rm -f /tmp/chunk_*.py
}
  1. Compress prompts

    prompt-compressor.py
    import re
    import tiktoken
    class PromptCompressor:
    def __init__(self):
    self.encoder = tiktoken.get_encoding("cl100k_base")
    def compress(self, prompt):
    """Compress prompt while preserving meaning"""
    # Remove excessive whitespace
    prompt = re.sub(r'\s+', ' ', prompt)
    # Use abbreviations
    abbreviations = {
    'function': 'func',
    'variable': 'var',
    'parameter': 'param',
    'return': 'ret',
    'implement': 'impl',
    'documentation': 'docs',
    'configuration': 'config'
    }
    for long, short in abbreviations.items():
    prompt = prompt.replace(long, short)
    # Remove filler words
    fillers = ['please', 'could you', 'I would like', 'can you']
    for filler in fillers:
    prompt = prompt.replace(filler, '')
    return prompt.strip()
    def estimate_tokens(self, text):
    """Estimate token count"""
    return len(self.encoder.encode(text))
    def optimize_context(self, files, max_tokens=3000):
    """Optimize file context to fit token limit"""
    context_parts = []
    current_tokens = 0
    for file_path, content in files.items():
    # Get important parts
    important = self.extract_important(content)
    tokens = self.estimate_tokens(important)
    if current_tokens + tokens <= max_tokens:
    context_parts.append(f"# {file_path}\n{important}")
    current_tokens += tokens
    else:
    # Add summary only
    summary = self.summarize(content)
    context_parts.append(f"# {file_path} (summary)\n{summary}")
    current_tokens += self.estimate_tokens(summary)
    return "\n\n".join(context_parts)
  2. Response caching

    cache-responses.sh
    # Create cache directory
    CACHE_DIR="$HOME/.claude-cache"
    mkdir -p "$CACHE_DIR"
    # Cached Claude Code function
    ccache() {
    local prompt="$1"
    shift
    # Generate cache key
    cache_key=$(echo "$prompt $@" | sha256sum | cut -d' ' -f1)
    cache_file="$CACHE_DIR/$cache_key"
    # Check cache
    if [ -f "$cache_file" ] && [ -z "$CLAUDE_NO_CACHE" ]; then
    echo "📦 Using cached response..."
    cat "$cache_file"
    else
    # Run Claude Code and cache result
    claude "$prompt" "$@" | tee "$cache_file"
    fi
    }
    # Cache cleanup (run daily)
    cleanup_cache() {
    find "$CACHE_DIR" -type f -mtime +7 -delete
    echo "🧹 Cleaned old cache entries"
    }

Pipeline Processing

pipeline-processor.sh
#!/bin/bash
# Process code through multiple stages
process_pipeline() {
local input_file="$1"
local temp_dir=$(mktemp -d)
# Stage 1: Analysis
echo "🔍 Stage 1: Analysis"
claude "Analyze $input_file for issues" \
--output "$temp_dir/analysis.md"
# Stage 2: Planning
echo "📋 Stage 2: Planning"
claude "Based on the analysis, create refactoring plan" \
--input "$temp_dir/analysis.md" \
--output "$temp_dir/plan.md"
# Stage 3: Implementation
echo "🔨 Stage 3: Implementation"
claude "Implement the refactoring plan" \
--input "$temp_dir/plan.md" \
--context "$input_file" \
--output "$temp_dir/refactored.js"
# Stage 4: Verification
echo "✅ Stage 4: Verification"
claude "Compare original and refactored versions, ensure functionality preserved" \
--context "$input_file" \
--context "$temp_dir/refactored.js" \
--output "$temp_dir/verification.md"
# Show results
echo "📊 Pipeline Results:"
cat "$temp_dir/verification.md"
# Optional: Apply changes
read -p "Apply changes? (y/n) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
cp "$temp_dir/refactored.js" "$input_file"
echo "✅ Changes applied"
fi
# Cleanup
rm -rf "$temp_dir"
}

Smart Grouping

batch-optimizer.py
from collections import defaultdict
import subprocess
def group_files_by_pattern(files):
"""Group files by similar patterns"""
groups = defaultdict(list)
for file in files:
# Group by directory
dir_key = os.path.dirname(file)
groups[f"dir:{dir_key}"].append(file)
# Group by file type
ext = os.path.splitext(file)[1]
groups[f"ext:{ext}"].append(file)
# Group by prefix
prefix = os.path.basename(file).split('_')[0]
groups[f"prefix:{prefix}"].append(file)
return groups
def batch_process_groups(groups, command_template):
"""Process groups efficiently"""
for group_key, files in groups.items():
if len(files) > 1:
# Batch similar files
print(f"Processing {group_key} ({len(files)} files)")
file_list = ' '.join(files)
cmd = command_template.format(files=file_list)
subprocess.run(cmd, shell=True)
else:
# Process single file
subprocess.run(
command_template.format(files=files[0]),
shell=True
)

Priority Queue

priority-processor.py
import heapq
from datetime import datetime
class PriorityProcessor:
def __init__(self):
self.queue = []
def add_task(self, priority, task):
# Lower number = higher priority
heapq.heappush(self.queue, (priority, datetime.now(), task))
def process_next(self):
if self.queue:
priority, timestamp, task = heapq.heappop(self.queue)
return task
return None
def batch_add_from_analysis(self, analysis_file):
"""Add tasks based on code analysis"""
with open(analysis_file) as f:
analysis = json.load(f)
for issue in analysis['issues']:
if issue['severity'] == 'error':
priority = 1
elif issue['severity'] == 'warning':
priority = 5
else:
priority = 10
task = {
'file': issue['file'],
'line': issue['line'],
'fix': issue['suggested_fix'],
'command': f"claude 'Fix: {issue['message']}' --edit"
}
self.add_task(priority, task)

Personal Productivity Dashboard

claude-analytics.py
#!/usr/bin/env python3
import sqlite3
import json
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
class ClaudeAnalytics:
def __init__(self, db_path="~/.claude-analytics.db"):
self.conn = sqlite3.connect(os.path.expanduser(db_path))
self.init_db()
def init_db(self):
self.conn.execute("""
CREATE TABLE IF NOT EXISTS usage (
id INTEGER PRIMARY KEY,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
command TEXT,
tokens_used INTEGER,
execution_time REAL,
file_type TEXT,
operation_type TEXT
)
""")
def log_usage(self, command, tokens, exec_time):
"""Log Claude Code usage"""
# Extract operation type
op_type = 'other'
if 'review' in command:
op_type = 'review'
elif 'test' in command:
op_type = 'test'
elif 'refactor' in command:
op_type = 'refactor'
elif 'document' in command:
op_type = 'documentation'
self.conn.execute("""
INSERT INTO usage (command, tokens_used, execution_time, operation_type)
VALUES (?, ?, ?, ?)
""", (command, tokens, exec_time, op_type))
self.conn.commit()
def generate_report(self):
"""Generate productivity report"""
# Last 7 days usage
week_ago = datetime.now() - timedelta(days=7)
cursor = self.conn.execute("""
SELECT
DATE(timestamp) as date,
COUNT(*) as operations,
SUM(tokens_used) as total_tokens,
AVG(execution_time) as avg_time,
operation_type
FROM usage
WHERE timestamp > ?
GROUP BY DATE(timestamp), operation_type
ORDER BY date DESC
""", (week_ago,))
data = cursor.fetchall()
# Create visualizations
self.plot_usage_trends(data)
self.plot_operation_distribution(data)
# Calculate insights
total_ops = sum(row[1] for row in data)
total_tokens = sum(row[2] for row in data)
time_saved = total_ops * 10 # Assume 10 min saved per operation
print(f"""
📊 Claude Code Productivity Report
=================================
Last 7 Days:
- Total Operations: {total_ops}
- Tokens Used: {total_tokens:,}
- Estimated Time Saved: {time_saved} minutes
- Most Common Operation: {self.most_common_op(data)}
Daily Average:
- Operations: {total_ops / 7:.1f}
- Tokens: {total_tokens / 7:,.0f}
💡 Productivity Tips:
{self.generate_tips(data)}
""")
def generate_tips(self, data):
"""Generate personalized productivity tips"""
tips = []
# Analyze patterns
op_counts = defaultdict(int)
for row in data:
op_counts[row[4]] += row[1]
if op_counts['review'] < op_counts['test']:
tips.append("- Consider more code reviews to catch issues early")
if op_counts['documentation'] < total_ops * 0.1:
tips.append("- Increase documentation generation for better maintainability")
return '\n'.join(tips) if tips else "- Keep up the great work!"
# Wrapper for Claude Code with analytics
def claude_with_analytics():
analytics = ClaudeAnalytics()
# Wrap Claude Code command
import sys
import time
start_time = time.time()
# Run actual command
os.system(f"claude {' '.join(sys.argv[1:])}")
# Log usage
exec_time = time.time() - start_time
command = ' '.join(sys.argv[1:])
tokens = estimate_tokens(command) # Implement token estimation
analytics.log_usage(command, tokens, exec_time)
if __name__ == "__main__":
if "--report" in sys.argv:
ClaudeAnalytics().generate_report()
else:
claude_with_analytics()

Essential Shortcuts

Terminal window
# Most useful aliases
alias cc='claudecode'
alias ccr='claude review'
alias cct='claude "Generate tests for"'
alias ccd='claude "Document"'
alias ccf='claude "Fix"'
# Quick functions
ccfunc() { claude "Explain the function at line $2 in $1"; }
ccopt() { claude "Optimize $1 for performance"; }
ccsec() { claude "Security audit $1"; }
# Batch operations
ccall() { find . -name "*.js" | xargs -P 4 -I {} claude "$1 {}" --edit; }
ccchanged() { git diff --name-only | xargs -I {} claude "$1 {}"; }
# Context helpers
ccwith() { claude "$1" --context "$2" --context "$3"; }
ccproject() { claude "$1" --context "$(find . -name '*.md' | head -5)"; }

Continue mastering Claude Code:

Remember: The best efficiency hack is understanding your own workflow. Track what you do most often and optimize those specific tasks first. Small time savings on frequent operations add up to massive productivity gains.