mirror of
https://github.com/curl/curl.git
synced 2025-09-09 13:49:44 +03:00
tool_operate: make retrycheck() a separate function
Simplifies post_per_transfer() Closes #17381
This commit is contained in:
parent
c26da713e7
commit
dd22442e3b
|
@ -361,6 +361,198 @@ void single_transfer_cleanup(struct OperationConfig *config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static CURLcode retrycheck(struct OperationConfig *config,
|
||||||
|
struct per_transfer *per,
|
||||||
|
CURLcode result,
|
||||||
|
bool *retryp,
|
||||||
|
long *delayms)
|
||||||
|
{
|
||||||
|
CURL *curl = per->curl;
|
||||||
|
struct OutStruct *outs = &per->outs;
|
||||||
|
enum {
|
||||||
|
RETRY_NO,
|
||||||
|
RETRY_ALL_ERRORS,
|
||||||
|
RETRY_TIMEOUT,
|
||||||
|
RETRY_CONNREFUSED,
|
||||||
|
RETRY_HTTP,
|
||||||
|
RETRY_FTP,
|
||||||
|
RETRY_LAST /* not used */
|
||||||
|
} retry = RETRY_NO;
|
||||||
|
long response = 0;
|
||||||
|
if((CURLE_OPERATION_TIMEDOUT == result) ||
|
||||||
|
(CURLE_COULDNT_RESOLVE_HOST == result) ||
|
||||||
|
(CURLE_COULDNT_RESOLVE_PROXY == result) ||
|
||||||
|
(CURLE_FTP_ACCEPT_TIMEOUT == result))
|
||||||
|
/* retry timeout always */
|
||||||
|
retry = RETRY_TIMEOUT;
|
||||||
|
else if(config->retry_connrefused &&
|
||||||
|
(CURLE_COULDNT_CONNECT == result)) {
|
||||||
|
long oserrno = 0;
|
||||||
|
curl_easy_getinfo(curl, CURLINFO_OS_ERRNO, &oserrno);
|
||||||
|
if(SOCKECONNREFUSED == oserrno)
|
||||||
|
retry = RETRY_CONNREFUSED;
|
||||||
|
}
|
||||||
|
else if((CURLE_OK == result) ||
|
||||||
|
((config->failonerror || config->failwithbody) &&
|
||||||
|
(CURLE_HTTP_RETURNED_ERROR == result))) {
|
||||||
|
/* If it returned OK. _or_ failonerror was enabled and it
|
||||||
|
returned due to such an error, check for HTTP transient
|
||||||
|
errors to retry on. */
|
||||||
|
const char *scheme;
|
||||||
|
curl_easy_getinfo(curl, CURLINFO_SCHEME, &scheme);
|
||||||
|
scheme = proto_token(scheme);
|
||||||
|
if(scheme == proto_http || scheme == proto_https) {
|
||||||
|
/* This was HTTP(S) */
|
||||||
|
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
|
||||||
|
|
||||||
|
switch(response) {
|
||||||
|
case 408: /* Request Timeout */
|
||||||
|
case 429: /* Too Many Requests (RFC6585) */
|
||||||
|
case 500: /* Internal Server Error */
|
||||||
|
case 502: /* Bad Gateway */
|
||||||
|
case 503: /* Service Unavailable */
|
||||||
|
case 504: /* Gateway Timeout */
|
||||||
|
retry = RETRY_HTTP;
|
||||||
|
/*
|
||||||
|
* At this point, we have already written data to the output
|
||||||
|
* file (or terminal). If we write to a file, we must rewind
|
||||||
|
* or close/re-open the file so that the next attempt starts
|
||||||
|
* over from the beginning.
|
||||||
|
*
|
||||||
|
* For the upload case, we might need to start over reading from a
|
||||||
|
* previous point if we have uploaded something when this was
|
||||||
|
* returned.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} /* if CURLE_OK */
|
||||||
|
else if(result) {
|
||||||
|
const char *scheme;
|
||||||
|
|
||||||
|
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
|
||||||
|
curl_easy_getinfo(curl, CURLINFO_SCHEME, &scheme);
|
||||||
|
scheme = proto_token(scheme);
|
||||||
|
|
||||||
|
if((scheme == proto_ftp || scheme == proto_ftps) && response / 100 == 4)
|
||||||
|
/*
|
||||||
|
* This is typically when the FTP server only allows a certain
|
||||||
|
* amount of users and we are not one of them. All 4xx codes
|
||||||
|
* are transient.
|
||||||
|
*/
|
||||||
|
retry = RETRY_FTP;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(result && !retry && config->retry_all_errors)
|
||||||
|
retry = RETRY_ALL_ERRORS;
|
||||||
|
|
||||||
|
if(retry) {
|
||||||
|
long sleeptime = 0;
|
||||||
|
curl_off_t retry_after = 0;
|
||||||
|
static const char * const m[]={
|
||||||
|
NULL,
|
||||||
|
"(retrying all errors)",
|
||||||
|
": timeout",
|
||||||
|
": connection refused",
|
||||||
|
": HTTP error",
|
||||||
|
": FTP error"
|
||||||
|
};
|
||||||
|
|
||||||
|
sleeptime = per->retry_sleep;
|
||||||
|
if(RETRY_HTTP == retry) {
|
||||||
|
curl_easy_getinfo(curl, CURLINFO_RETRY_AFTER, &retry_after);
|
||||||
|
if(retry_after) {
|
||||||
|
/* store in a 'long', make sure it does not overflow */
|
||||||
|
if(retry_after > LONG_MAX/1000)
|
||||||
|
sleeptime = LONG_MAX;
|
||||||
|
else if((retry_after * 1000) > sleeptime)
|
||||||
|
sleeptime = (long)retry_after * 1000; /* milliseconds */
|
||||||
|
|
||||||
|
/* if adding retry_after seconds to the process would exceed the
|
||||||
|
maximum time allowed for retrying, then exit the retries right
|
||||||
|
away */
|
||||||
|
if(config->retry_maxtime) {
|
||||||
|
curl_off_t seconds = curlx_timediff(curlx_now(),
|
||||||
|
per->retrystart)/1000;
|
||||||
|
|
||||||
|
if((CURL_OFF_T_MAX - retry_after < seconds) ||
|
||||||
|
(seconds + retry_after > config->retry_maxtime)) {
|
||||||
|
warnf(config->global, "The Retry-After: time would "
|
||||||
|
"make this command line exceed the maximum allowed time "
|
||||||
|
"for retries.");
|
||||||
|
*retryp = FALSE;
|
||||||
|
return CURLE_OK; /* no retry */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
warnf(config->global, "Problem %s. "
|
||||||
|
"Will retry in %ld second%s. "
|
||||||
|
"%ld retr%s left.",
|
||||||
|
m[retry], sleeptime/1000L,
|
||||||
|
(sleeptime/1000L == 1 ? "" : "s"),
|
||||||
|
per->retry_remaining,
|
||||||
|
(per->retry_remaining > 1 ? "ies" : "y"));
|
||||||
|
|
||||||
|
per->retry_remaining--;
|
||||||
|
if(!config->retry_delay) {
|
||||||
|
per->retry_sleep *= 2;
|
||||||
|
if(per->retry_sleep > RETRY_SLEEP_MAX)
|
||||||
|
per->retry_sleep = RETRY_SLEEP_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(outs->bytes && outs->filename && outs->stream) {
|
||||||
|
#ifndef __MINGW32CE__
|
||||||
|
struct_stat fileinfo;
|
||||||
|
|
||||||
|
/* The output can be a named pipe or a character device etc that
|
||||||
|
cannot be truncated. Only truncate regular files. */
|
||||||
|
if(!fstat(fileno(outs->stream), &fileinfo) &&
|
||||||
|
S_ISREG(fileinfo.st_mode))
|
||||||
|
#else
|
||||||
|
/* Windows CE's fileno() is bad so just skip the check */
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
/* We have written data to an output file, we truncate file */
|
||||||
|
fflush(outs->stream);
|
||||||
|
notef(config->global,
|
||||||
|
"Throwing away %" CURL_FORMAT_CURL_OFF_T " bytes",
|
||||||
|
outs->bytes);
|
||||||
|
/* truncate file at the position where we started appending */
|
||||||
|
#if defined(HAVE_FTRUNCATE) && !defined(__DJGPP__) && !defined(__AMIGA__) && \
|
||||||
|
!defined(__MINGW32CE__)
|
||||||
|
if(ftruncate(fileno(outs->stream), outs->init)) {
|
||||||
|
/* when truncate fails, we cannot just append as then we will
|
||||||
|
create something strange, bail out */
|
||||||
|
errorf(config->global, "Failed to truncate file");
|
||||||
|
return CURLE_WRITE_ERROR;
|
||||||
|
}
|
||||||
|
/* now seek to the end of the file, the position where we
|
||||||
|
just truncated the file in a large file-safe way */
|
||||||
|
rc = fseek(outs->stream, 0, SEEK_END);
|
||||||
|
#else
|
||||||
|
/* ftruncate is not available, so just reposition the file
|
||||||
|
to the location we would have truncated it. This will not
|
||||||
|
work properly with large files on 32-bit systems, but
|
||||||
|
most of those will have ftruncate. */
|
||||||
|
rc = fseek(outs->stream, (long)outs->init, SEEK_SET);
|
||||||
|
#endif
|
||||||
|
if(rc) {
|
||||||
|
errorf(config->global, "Failed seeking to end of file");
|
||||||
|
return CURLE_WRITE_ERROR;
|
||||||
|
}
|
||||||
|
outs->bytes = 0; /* clear for next round */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*retryp = TRUE;
|
||||||
|
per->num_retries++;
|
||||||
|
*delayms = sleeptime;
|
||||||
|
}
|
||||||
|
return CURLE_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call this after a transfer has completed.
|
* Call this after a transfer has completed.
|
||||||
*/
|
*/
|
||||||
|
@ -457,187 +649,10 @@ static CURLcode post_per_transfer(struct GlobalConfig *global,
|
||||||
(!config->retry_maxtime ||
|
(!config->retry_maxtime ||
|
||||||
(curlx_timediff(curlx_now(), per->retrystart) <
|
(curlx_timediff(curlx_now(), per->retrystart) <
|
||||||
config->retry_maxtime*1000L)) ) {
|
config->retry_maxtime*1000L)) ) {
|
||||||
enum {
|
result = retrycheck(config, per, result, retryp, delay);
|
||||||
RETRY_NO,
|
if(!result && *retryp)
|
||||||
RETRY_ALL_ERRORS,
|
return CURLE_OK; /* retry! */
|
||||||
RETRY_TIMEOUT,
|
}
|
||||||
RETRY_CONNREFUSED,
|
|
||||||
RETRY_HTTP,
|
|
||||||
RETRY_FTP,
|
|
||||||
RETRY_LAST /* not used */
|
|
||||||
} retry = RETRY_NO;
|
|
||||||
long response = 0;
|
|
||||||
if((CURLE_OPERATION_TIMEDOUT == result) ||
|
|
||||||
(CURLE_COULDNT_RESOLVE_HOST == result) ||
|
|
||||||
(CURLE_COULDNT_RESOLVE_PROXY == result) ||
|
|
||||||
(CURLE_FTP_ACCEPT_TIMEOUT == result))
|
|
||||||
/* retry timeout always */
|
|
||||||
retry = RETRY_TIMEOUT;
|
|
||||||
else if(config->retry_connrefused &&
|
|
||||||
(CURLE_COULDNT_CONNECT == result)) {
|
|
||||||
long oserrno = 0;
|
|
||||||
curl_easy_getinfo(curl, CURLINFO_OS_ERRNO, &oserrno);
|
|
||||||
if(SOCKECONNREFUSED == oserrno)
|
|
||||||
retry = RETRY_CONNREFUSED;
|
|
||||||
}
|
|
||||||
else if((CURLE_OK == result) ||
|
|
||||||
((config->failonerror || config->failwithbody) &&
|
|
||||||
(CURLE_HTTP_RETURNED_ERROR == result))) {
|
|
||||||
/* If it returned OK. _or_ failonerror was enabled and it
|
|
||||||
returned due to such an error, check for HTTP transient
|
|
||||||
errors to retry on. */
|
|
||||||
const char *scheme;
|
|
||||||
curl_easy_getinfo(curl, CURLINFO_SCHEME, &scheme);
|
|
||||||
scheme = proto_token(scheme);
|
|
||||||
if(scheme == proto_http || scheme == proto_https) {
|
|
||||||
/* This was HTTP(S) */
|
|
||||||
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
|
|
||||||
|
|
||||||
switch(response) {
|
|
||||||
case 408: /* Request Timeout */
|
|
||||||
case 429: /* Too Many Requests (RFC6585) */
|
|
||||||
case 500: /* Internal Server Error */
|
|
||||||
case 502: /* Bad Gateway */
|
|
||||||
case 503: /* Service Unavailable */
|
|
||||||
case 504: /* Gateway Timeout */
|
|
||||||
retry = RETRY_HTTP;
|
|
||||||
/*
|
|
||||||
* At this point, we have already written data to the output
|
|
||||||
* file (or terminal). If we write to a file, we must rewind
|
|
||||||
* or close/re-open the file so that the next attempt starts
|
|
||||||
* over from the beginning.
|
|
||||||
*
|
|
||||||
* For the upload case, we might need to start over reading from a
|
|
||||||
* previous point if we have uploaded something when this was
|
|
||||||
* returned.
|
|
||||||
*/
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} /* if CURLE_OK */
|
|
||||||
else if(result) {
|
|
||||||
const char *scheme;
|
|
||||||
|
|
||||||
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
|
|
||||||
curl_easy_getinfo(curl, CURLINFO_SCHEME, &scheme);
|
|
||||||
scheme = proto_token(scheme);
|
|
||||||
|
|
||||||
if((scheme == proto_ftp || scheme == proto_ftps) && response / 100 == 4)
|
|
||||||
/*
|
|
||||||
* This is typically when the FTP server only allows a certain
|
|
||||||
* amount of users and we are not one of them. All 4xx codes
|
|
||||||
* are transient.
|
|
||||||
*/
|
|
||||||
retry = RETRY_FTP;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(result && !retry && config->retry_all_errors)
|
|
||||||
retry = RETRY_ALL_ERRORS;
|
|
||||||
|
|
||||||
if(retry) {
|
|
||||||
long sleeptime = 0;
|
|
||||||
curl_off_t retry_after = 0;
|
|
||||||
static const char * const m[]={
|
|
||||||
NULL,
|
|
||||||
"(retrying all errors)",
|
|
||||||
": timeout",
|
|
||||||
": connection refused",
|
|
||||||
": HTTP error",
|
|
||||||
": FTP error"
|
|
||||||
};
|
|
||||||
|
|
||||||
sleeptime = per->retry_sleep;
|
|
||||||
if(RETRY_HTTP == retry) {
|
|
||||||
curl_easy_getinfo(curl, CURLINFO_RETRY_AFTER, &retry_after);
|
|
||||||
if(retry_after) {
|
|
||||||
/* store in a 'long', make sure it does not overflow */
|
|
||||||
if(retry_after > LONG_MAX/1000)
|
|
||||||
sleeptime = LONG_MAX;
|
|
||||||
else if((retry_after * 1000) > sleeptime)
|
|
||||||
sleeptime = (long)retry_after * 1000; /* milliseconds */
|
|
||||||
|
|
||||||
/* if adding retry_after seconds to the process would exceed the
|
|
||||||
maximum time allowed for retrying, then exit the retries right
|
|
||||||
away */
|
|
||||||
if(config->retry_maxtime) {
|
|
||||||
curl_off_t seconds = curlx_timediff(curlx_now(),
|
|
||||||
per->retrystart)/1000;
|
|
||||||
|
|
||||||
if((CURL_OFF_T_MAX - retry_after < seconds) ||
|
|
||||||
(seconds + retry_after > config->retry_maxtime)) {
|
|
||||||
warnf(config->global, "The Retry-After: time would "
|
|
||||||
"make this command line exceed the maximum allowed time "
|
|
||||||
"for retries.");
|
|
||||||
goto noretry;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
warnf(config->global, "Problem %s. "
|
|
||||||
"Will retry in %ld second%s. "
|
|
||||||
"%ld retr%s left.",
|
|
||||||
m[retry], sleeptime/1000L,
|
|
||||||
(sleeptime/1000L == 1 ? "" : "s"),
|
|
||||||
per->retry_remaining,
|
|
||||||
(per->retry_remaining > 1 ? "ies" : "y"));
|
|
||||||
|
|
||||||
per->retry_remaining--;
|
|
||||||
if(!config->retry_delay) {
|
|
||||||
per->retry_sleep *= 2;
|
|
||||||
if(per->retry_sleep > RETRY_SLEEP_MAX)
|
|
||||||
per->retry_sleep = RETRY_SLEEP_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(outs->bytes && outs->filename && outs->stream) {
|
|
||||||
#ifndef __MINGW32CE__
|
|
||||||
struct_stat fileinfo;
|
|
||||||
|
|
||||||
/* The output can be a named pipe or a character device etc that
|
|
||||||
cannot be truncated. Only truncate regular files. */
|
|
||||||
if(!fstat(fileno(outs->stream), &fileinfo) &&
|
|
||||||
S_ISREG(fileinfo.st_mode))
|
|
||||||
#else
|
|
||||||
/* Windows CE's fileno() is bad so just skip the check */
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
/* We have written data to an output file, we truncate file */
|
|
||||||
fflush(outs->stream);
|
|
||||||
notef(config->global,
|
|
||||||
"Throwing away %" CURL_FORMAT_CURL_OFF_T " bytes",
|
|
||||||
outs->bytes);
|
|
||||||
/* truncate file at the position where we started appending */
|
|
||||||
#if defined(HAVE_FTRUNCATE) && !defined(__DJGPP__) && !defined(__AMIGA__) && \
|
|
||||||
!defined(__MINGW32CE__)
|
|
||||||
if(ftruncate(fileno(outs->stream), outs->init)) {
|
|
||||||
/* when truncate fails, we cannot just append as then we will
|
|
||||||
create something strange, bail out */
|
|
||||||
errorf(config->global, "Failed to truncate file");
|
|
||||||
return CURLE_WRITE_ERROR;
|
|
||||||
}
|
|
||||||
/* now seek to the end of the file, the position where we
|
|
||||||
just truncated the file in a large file-safe way */
|
|
||||||
rc = fseek(outs->stream, 0, SEEK_END);
|
|
||||||
#else
|
|
||||||
/* ftruncate is not available, so just reposition the file
|
|
||||||
to the location we would have truncated it. This will not
|
|
||||||
work properly with large files on 32-bit systems, but
|
|
||||||
most of those will have ftruncate. */
|
|
||||||
rc = fseek(outs->stream, (long)outs->init, SEEK_SET);
|
|
||||||
#endif
|
|
||||||
if(rc) {
|
|
||||||
errorf(config->global, "Failed seeking to end of file");
|
|
||||||
return CURLE_WRITE_ERROR;
|
|
||||||
}
|
|
||||||
outs->bytes = 0; /* clear for next round */
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*retryp = TRUE;
|
|
||||||
per->num_retries++;
|
|
||||||
*delay = sleeptime;
|
|
||||||
return CURLE_OK;
|
|
||||||
}
|
|
||||||
} /* if retry_remaining */
|
|
||||||
noretry:
|
|
||||||
|
|
||||||
if((global->progressmode == CURL_PROGRESS_BAR) &&
|
if((global->progressmode == CURL_PROGRESS_BAR) &&
|
||||||
per->progressbar.calls)
|
per->progressbar.calls)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user