# Robots.txt for ThinkCrypt - Software Development Company # Website: https://thinkcrypt.dev # Allow all search engines to crawl the site User-agent: * Allow: / # Block access to admin and private areas Disallow: /admin/ Disallow: /api/ Disallow: /_next/ Disallow: /private/ Disallow: /.well-known/ # Block crawling of search and filter pages to avoid duplicate content Disallow: /search Disallow: /*?category=* Disallow: /*?filter=* Disallow: /*?sort=* Disallow: /*?page=* # Allow crawling of specific important paths Allow: /api/portfolio$ Allow: /api/blog$ # Sitemap location Sitemap: https://thinkcrypt.dev/sitemap.xml # Crawl delay (optional - helps with server load) Crawl-delay: 1 # Specific rules for major search engines User-agent: Googlebot Allow: / Crawl-delay: 0 User-agent: Bingbot Allow: / Crawl-delay: 1 User-agent: facebookexternalhit Allow: / User-agent: Twitterbot Allow: / User-agent: LinkedInBot Allow: / # Block specific bot types that might not be useful User-agent: AhrefsBot Disallow: / User-agent: MJ12bot Disallow: / User-agent: DotBot Disallow: /