Dirscan-基于Python的目录爆破工具

发布于 2021-08-09  98 次阅读


层楼终究误少年,自由早晚乱余生

前言:

自己的开发水平一直都比较差。最近有时间,操练起来。菜不是理由,菜就去学!

目录结构

具体代码

# dirscan.py
import argparse
from lib.init import Init


def main():
    parser = argparse.ArgumentParser(description="A simple directory scanner,Author's QQ:1779176323")
    parser.add_argument("url", help="The website to be scanned", type=str)
    parser.add_argument("-d", "--dictionary", help="Select the dictionary name",default="default.txt")
    parser.add_argument("-o", "--output", help="Scan Result Name", default="default.txt")
    parser.add_argument("-t", "--threads", help="Number of thread bursting directories", default=5, type=int)
    args = parser.parse_args()

    scan = Init(args)
    scan.start_scan()

if __name__ == '__main__':
    main()
# init.py
import sys
from .scan import Scan


class Init:
    def __init__(self, args):
        self.url = self.init_url(args.url)
        self.dictionary = args.dictionary
        self.output = args.output
        self.threads = args.threads

    def init_url(self, url):
        '''
        	处理成标准的url格式
        '''
        if not url.startswith('http') :
            url = 'http://' + url

        if not url.endswith('/'):
            url = url + '/'

        return url

    def get_file(self):
        datalist = list()
        with open("dict/" + self.dictionary, "r",encoding="utf-8") as f:
            for line in f.readlines():
                line = line.strip("\n")
                datalist.append(line)

        return datalist

    def get_path(self,datalist):
        urllist = list()
        for data in datalist:
            urllist.append(self.url + data)
        return urllist

    def start_scan(self):
        datalist = list()
        urllist = list()

        datalist = self.get_file()  # 获取爆破路径
        urllist = self.get_path(datalist) # 获取完整路径

        scan = Scan(urllist, self.threads, self.output)
        scan.get_url()

# scan.py
import requests

class Scan:
    def __init__(self, urllist, threads, output):
        self.urllist = urllist
        self.threads = threads
        self.output = output

    def get_url(self):
        for url in self.urllist:
            respone = requests.get(url)

            print("%s is scaning" % ( url))

            if respone.status_code == 200 or respone.status_code == 404:
                print("[%d] => %s" % (respone.status_code, url))

                with open("output/" + self.output, "a") as f:
                    f.write("[%d] => %s\n" % (respone.status_code, url))

目前的不足:

  1. 目前实现了单线程,控制线程没有实现
  2. 延时没有实现
  3. 扫描结果不够智能,接下来会针对问题做出优化