1
0
mirror of https://github.com/moparisthebest/curl synced 2024-12-22 16:18:48 -05:00

better detection for when add_buffer() returns failure, and return when that

happens
This commit is contained in:
Daniel Stenberg 2004-05-10 10:49:35 +00:00
parent 887d78a9ad
commit 71fdc063bd

View File

@ -1886,19 +1886,29 @@ CURLcode Curl_http(struct connectdata *conn)
This limit is no magic limit but only set to prevent really huge This limit is no magic limit but only set to prevent really huge
POSTs to get the data duplicated with malloc() and family. */ POSTs to get the data duplicated with malloc() and family. */
add_buffer(req_buffer, "\r\n", 2); /* end of headers! */ result == add_buffer(req_buffer, "\r\n", 2); /* end of headers! */
if(result)
return result;
if(!conn->bits.upload_chunky) if(!conn->bits.upload_chunky) {
/* We're not sending it 'chunked', append it to the request /* We're not sending it 'chunked', append it to the request
already now to reduce the number if send() calls */ already now to reduce the number if send() calls */
add_buffer(req_buffer, data->set.postfields, (size_t)postsize); result = add_buffer(req_buffer, data->set.postfields,
(size_t)postsize);
}
else { else {
/* Append the POST data chunky-style */ /* Append the POST data chunky-style */
add_bufferf(req_buffer, "%x\r\n", (int)postsize); result = add_bufferf(req_buffer, "%x\r\n", (int)postsize);
add_buffer(req_buffer, data->set.postfields, (size_t)postsize); if(CURLE_OK == result)
add_buffer(req_buffer, "\r\n0\r\n\r\n", 7); /* end of a chunked result = add_buffer(req_buffer, data->set.postfields,
(size_t)postsize);
if(CURLE_OK == result)
result = add_buffer(req_buffer,
"\r\n0\r\n\r\n", 7); /* end of a chunked
transfer stream */ transfer stream */
} }
if(result)
return result;
} }
else { else {
/* A huge POST coming up, do data separate from the request */ /* A huge POST coming up, do data separate from the request */