Fix for issue #179 (#180)

* Fix for issue #179

* additional changes after review | #179

* Further info -> README.md regarding #179 & CONTRIBUTORS.md

* Further info -> README.md regarding #179 & CONTRIBUTORS.md

* Further info -> README.md regarding #179

* Fix for removed "Changes"

* Fix for removed "Changes"
This commit is contained in:
Damian Schwyrz 2020-02-27 14:19:07 +01:00 committed by GitHub
parent 2d165e6179
commit 7ffd74d87e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 74 additions and 8 deletions

View File

@ -2,6 +2,8 @@
- master
- New
- New CLI flag `-maxtime-job` to set max. execution time per job.
- Changed behaviour of `-maxtime`, can now be used for entire process.
- Changed
- v1.0.2

View File

@ -3,6 +3,7 @@
* [bjhulst](https://github.com/bjhulst)
* [ccsplit](https://github.com/ccsplit)
* [codingo](https://github.com/codingo)
* [Damian89](https://github.com/Damian89)
* [delic](https://github.com/delic)
* [eur0pa](https://github.com/eur0pa)
* [fang0654](https://github.com/fang0654)

View File

@ -63,6 +63,22 @@ This is a very straightforward operation, again by using the `FUZZ` keyword. Thi
ffuf -w /path/to/postdata.txt -X POST -d "username=admin\&password=FUZZ" -u https://target/login.php -fc 401
```
### Maximum execution time
If you don't want ffuf to run indefinitely, you can use the `-maxtime`. This stops __the entire__ process after a given time (in seconds).
```
ffuf -w /path/to/wordlist -u https://target/FUZZ -maxtime 60
```
When working with recursion, you can control the maxtime __per job__ using `-maxtime-job`. This will stop the current job after a given time (in seconds) and continue with the next one. New jobs are created when the recursion functionality detects a subdirectory.
```
ffuf -w /path/to/wordlist -u https://target/FUZZ -maxtime-job 60 -recursion -recursion-depth 2
```
It is also possible to combine both flags limiting the per job maximum execution time as well as the overall execution time. If you do not use recursion then both flags behave equally.
### Using external mutator to produce test cases
For this example, we'll fuzz JSON data that's sent over POST. [Radamsa](https://gitlab.com/akihe/radamsa) is used as the mutator.
@ -110,7 +126,8 @@ GENERAL OPTIONS:
-ac Automatically calibrate filtering options (default: false)
-acc Custom auto-calibration string. Can be used multiple times. Implies -ac
-c Colorize output. (default: false)
-maxtime Maximum running time in seconds. (default: 0)
-maxtime Maximum running time in seconds for the entire process. (default: 0)
-maxtime-job Maximum running time in seconds per job. (default: 0)
-p Seconds of `delay` between requests, or a range of random delay. For example "0.1" or "0.1-2.0"
-s Do not print additional information (silent mode) (default: false)
-sa Stop on all error cases. Implies -sf and -se. (default: false)

View File

@ -61,7 +61,7 @@ func Usage() {
Description: "",
Flags: make([]UsageFlag, 0),
Hidden: false,
ExpectedFlags: []string{"ac", "acc", "c", "maxtime", "p", "s", "sa", "se", "sf", "t", "v", "V"},
ExpectedFlags: []string{"ac", "acc", "c", "maxtime", "maxtime-job", "p", "s", "sa", "se", "sf", "t", "v", "V"},
}
u_compat := UsageSection{
Name: "COMPATIBILITY OPTIONS",

View File

@ -114,7 +114,8 @@ func main() {
flag.Var(&opts.AutoCalibrationStrings, "acc", "Custom auto-calibration string. Can be used multiple times. Implies -ac")
flag.IntVar(&conf.Threads, "t", 40, "Number of concurrent threads.")
flag.IntVar(&conf.Timeout, "timeout", 10, "HTTP request timeout in seconds.")
flag.IntVar(&conf.MaxTime, "maxtime", 0, "Maximum running time in seconds.")
flag.IntVar(&conf.MaxTime, "maxtime", 0, "Maximum running time in seconds for entire process.")
flag.IntVar(&conf.MaxTimeJob, "maxtime-job", 0, "Maximum running time in seconds per job.")
flag.BoolVar(&conf.Verbose, "v", false, "Verbose output, printing full URL and redirect location (if any) with the results.")
flag.BoolVar(&opts.showVersion, "V", false, "Show version information.")
flag.StringVar(&opts.debugLog, "debug-log", "", "Write all of the internal logging to the specified file.")

View File

@ -39,6 +39,7 @@ type Config struct {
CommandLine string `json:"cmdline"`
Verbose bool `json:"verbose"`
MaxTime int `json:"maxtime"`
MaxTimeJob int `json:"maxtime_job"`
Recursion bool `json:"recursion"`
RecursionDepth int `json:"recursion_depth"`
}
@ -78,6 +79,7 @@ func NewConfig(ctx context.Context) Config {
conf.DirSearchCompat = false
conf.Verbose = false
conf.MaxTime = 0
conf.MaxTimeJob = 0
conf.Recursion = false
conf.RecursionDepth = 0
return conf

View File

@ -24,10 +24,12 @@ type Job struct {
SpuriousErrorCounter int
Total int
Running bool
RunningJob bool
Count403 int
Count429 int
Error string
startTime time.Time
startTimeJob time.Time
queuejobs []QueueJob
queuepos int
currentDepth int
@ -44,6 +46,7 @@ func NewJob(conf *Config) Job {
j.ErrorCounter = 0
j.SpuriousErrorCounter = 0
j.Running = false
j.RunningJob = false
j.queuepos = 0
j.queuejobs = make([]QueueJob, 0)
j.currentDepth = 0
@ -81,12 +84,18 @@ func (j *Job) resetSpuriousErrors() {
//Start the execution of the Job
func (j *Job) Start() {
if j.startTime.IsZero() {
j.startTime = time.Now()
}
// Add the default job to job queue
j.queuejobs = append(j.queuejobs, QueueJob{Url: j.Config.Url, depth: 0})
rand.Seed(time.Now().UnixNano())
j.Total = j.Input.Total()
defer j.Stop()
j.Running = true
j.RunningJob = true
//Show banner if not running in silent mode
if !j.Config.Quiet {
j.Output.Banner()
@ -95,12 +104,14 @@ func (j *Job) Start() {
j.interruptMonitor()
for j.jobsInQueue() {
j.prepareQueueJob()
if j.queuepos > 1 {
if j.queuepos > 1 && !j.RunningJob {
// Print info for queued recursive jobs
j.Output.Info(fmt.Sprintf("Scanning: %s", j.Config.Url))
}
j.Input.Reset()
j.startTime = time.Now()
j.startTimeJob = time.Now()
j.RunningJob = true
j.Counter = 0
j.startExecution()
}
@ -127,13 +138,16 @@ func (j *Job) startExecution() {
go j.runProgress(&wg)
//Limiter blocks after reaching the buffer, ensuring limited concurrency
limiter := make(chan bool, j.Config.Threads)
for j.Input.Next() {
// Check if we should stop the process
j.CheckStop()
if !j.Running {
defer j.Output.Warning(j.Error)
break
}
limiter <- true
nextInput := j.Input.Value()
nextPosition := j.Input.Position()
@ -154,6 +168,11 @@ func (j *Job) startExecution() {
time.Sleep(sleepDurationMS * time.Millisecond)
}
}()
if !j.RunningJob {
defer j.Output.Warning(j.Error)
return
}
}
wg.Wait()
j.updateProgress()
@ -175,20 +194,27 @@ func (j *Job) runProgress(wg *sync.WaitGroup) {
defer wg.Done()
totalProgress := j.Input.Total()
for j.Counter <= totalProgress {
if !j.Running {
break
}
j.updateProgress()
if j.Counter == totalProgress {
return
}
if !j.RunningJob {
return
}
time.Sleep(time.Millisecond * time.Duration(j.Config.ProgressFrequency))
}
}
func (j *Job) updateProgress() {
prog := Progress{
StartedAt: j.startTime,
StartedAt: j.startTimeJob,
ReqCount: j.Counter,
ReqTotal: j.Input.Total(),
QueuePos: j.queuepos,
@ -367,15 +393,26 @@ func (j *Job) CheckStop() {
}
}
// check for maximum running time
// Check for runtime of entire process
if j.Config.MaxTime > 0 {
dur := time.Now().Sub(j.startTime)
runningSecs := int(dur / time.Second)
if runningSecs >= j.Config.MaxTime {
j.Error = "Maximum running time reached, exiting."
j.Error = "Maximum running time for entire process reached, exiting."
j.Stop()
}
}
// Check for runtime of current job
if j.Config.MaxTimeJob > 0 {
dur := time.Now().Sub(j.startTimeJob)
runningSecs := int(dur / time.Second)
if runningSecs >= j.Config.MaxTimeJob {
j.Error = "Maximum running time for this job reached, continuing with next job if one exists."
j.Next()
}
}
}
//Stop the execution of the Job
@ -383,3 +420,9 @@ func (j *Job) Stop() {
j.Running = false
return
}
//Stop current, resume to next
func (j *Job) Next() {
j.RunningJob = false
return
}