1
0
mirror of https://github.com/stefanocasazza/ULib.git synced 2025-09-28 19:05:55 +08:00
ULib/tests/examples/robots.txt
2015-01-23 17:24:36 +01:00

42 lines
1.0 KiB
Plaintext

# $Id: robots.txt,v 1.14 2009/09/11 04:17:07 webchick Exp $
#
# robots.txt
#
# This file is to prevent the crawling and indexing of certain parts
# of your site by web crawlers and spiders run by sites like Yahoo!
# and Google. By telling these "robots" where not to go on your site,
# you save bandwidth and server resources.
#
# This file will be ignored unless it is at the root of your host:
# Used: http://example.com/robots.txt
# Ignored: http://example.com/site/robots.txt
#
# For more information about the robots.txt standard, see:
# http://www.robotstxt.org/wc/robots.html
#
# For syntax checking, see:
# http://www.sxw.org.uk/computing/robots/check.html
User-agent: *
Crawl-delay: 10
# Directories
Disallow: /js/
Disallow: /css/
Disallow: /images/
Disallow: /form/
Disallow: /cgi-bin/
# Files
Disallow: /.htdigest
Disallow: /.htpasswd
Disallow: /favicon.ico
# Paths (clean URLs)
Disallow: /user/
Disallow: /admin/
Disallow: /search/
# Paths (no clean URLs)
Disallow: /?q=user/
Disallow: /?q=admin/
Disallow: /?q=search/