diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl index 1694ac87fcd..050282a0c1a 100644 --- a/src/couch/src/couch_httpd.erl +++ b/src/couch/src/couch_httpd.erl @@ -1170,6 +1170,19 @@ before_response(Req0, Code0, Headers0, Args0) -> respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response) -> MochiReq:start_response({Code, Headers}); +respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) -> + % Special handling for the 413 response. Make sure the socket is closed as + % we don't know how much data was read before the error was thrown. Also + % drain all the data in the receive buffer to avoid connction being reset + % before the 413 response is parsed by the client. This is still racy, it + % just increases the chances of 413 being detected correctly by the client + % (rather than getting a brutal TCP reset). + erlang:put(mochiweb_request_force_close, true), + Socket = MochiReq:get(socket), + mochiweb_socket:recv(Socket, 0, 0), + Result = MochiReq:Type({413, Headers, Args}), + mochiweb_socket:recv(Socket, 0, 0), + Result; respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) -> MochiReq:Type({Code, Headers, Args}). diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl index 6e787514bdb..2f865c6d2a5 100644 --- a/src/couch_replicator/src/couch_replicator_httpc.erl +++ b/src/couch_replicator/src/couch_replicator_httpc.erl @@ -28,7 +28,7 @@ -define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})). -define(MAX_WAIT, 5 * 60 * 1000). -define(STREAM_STATUS, ibrowse_stream_status). - +-define(STOP_HTTP_WORKER, stop_http_worker). % This limit is for the number of messages we're willing to discard % from an HTTP stream in clean_mailbox/1 before killing the worker @@ -78,10 +78,14 @@ send_req(HttpDb, Params1, Callback) -> throw:{retry, NewHttpDb0, NewParams0} -> {retry, NewHttpDb0, NewParams0} after - ok = couch_replicator_httpc_pool:release_worker( - HttpDb#httpdb.httpc_pool, - Worker - ), + Pool = HttpDb#httpdb.httpc_pool, + case get(?STOP_HTTP_WORKER) of + stop -> + ok = stop_and_release_worker(Pool, Worker), + erase(?STOP_HTTP_WORKER); + undefined -> + ok = couch_replicator_httpc_pool:release_worker(Pool, Worker) + end, clean_mailbox(Response) end, % This is necessary to keep this tail-recursive. Calling @@ -138,7 +142,7 @@ stop_and_release_worker(Pool, Worker) -> ok = couch_replicator_httpc_pool:release_worker_sync(Pool, Worker). process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) -> - stop_and_release_worker(HttpDb#httpdb.httpc_pool, Worker), + put(?STOP_HTTP_WORKER, stop), maybe_retry(sel_conn_closed, Worker, HttpDb, Params); @@ -147,7 +151,7 @@ process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) -> %% and closes the socket, ibrowse will detect that error when it sends %% next request. process_response({error, connection_closing}, Worker, HttpDb, Params, _Cb) -> - stop_and_release_worker(HttpDb#httpdb.httpc_pool, Worker), + put(?STOP_HTTP_WORKER, stop), maybe_retry({error, connection_closing}, Worker, HttpDb, Params); process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) -> @@ -167,6 +171,7 @@ process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) -> ?JSON_DECODE(Json) end, process_auth_response(HttpDb, Ok, Headers, Params), + if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end, Callback(Ok, Headers, EJson); R when R =:= 301 ; R =:= 302 ; R =:= 303 -> backoff_success(HttpDb, Params), @@ -194,6 +199,7 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) -> stream_data_self(HttpDb1, Params, Worker, ReqId, Callback) end, put(?STREAM_STATUS, {streaming, Worker}), + if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end, ibrowse:stream_next(ReqId), try Ret = Callback(Ok, Headers, StreamDataFun), diff --git a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl index 6f3308c3919..af3a285f5c0 100644 --- a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl +++ b/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl @@ -61,8 +61,8 @@ reduce_max_request_size_test_() -> % attachment which exceed maximum request size are simply % closed instead of returning a 413 request. That makes these % tests flaky. - % ++ [{Pair, fun should_replicate_one_with_attachment/2} - % || Pair <- Pairs] + ++ [{Pair, fun should_replicate_one_with_attachment/2} + || Pair <- Pairs] } }. @@ -90,12 +90,12 @@ should_replicate_one({From, To}, {_Ctx, {Source, Target}}) -> % POST-ing individual documents directly and skip bulk_docs. Test that case % separately % See note in main test function why this was disabled. -% should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) -> -% {lists:flatten(io_lib:format("~p -> ~p", [From, To])), -% {inorder, [should_populate_source_one_large_attachment(Source), -% should_populate_source(Source), -% should_replicate(Source, Target), -% should_compare_databases(Source, Target, [<<"doc0">>])]}}. +should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) -> + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [should_populate_source_one_large_attachment(Source), + should_populate_source(Source), + should_replicate(Source, Target), + should_compare_databases(Source, Target, [<<"doc0">>])]}}. should_populate_source({remote, Source}) -> @@ -112,11 +112,11 @@ should_populate_source_one_large_one_small(Source) -> {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}. -% should_populate_source_one_large_attachment({remote, Source}) -> -% should_populate_source_one_large_attachment(Source); +should_populate_source_one_large_attachment({remote, Source}) -> + should_populate_source_one_large_attachment(Source); -% should_populate_source_one_large_attachment(Source) -> -% {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}. +should_populate_source_one_large_attachment(Source) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}. should_replicate({remote, Source}, Target) -> @@ -156,8 +156,8 @@ one_large_one_small(DbName, Large, Small) -> add_doc(DbName, <<"doc1">>, Small, 0). -% one_large_attachment(DbName, Size, AttSize) -> -% add_doc(DbName, <<"doc0">>, Size, AttSize). +one_large_attachment(DbName, Size, AttSize) -> + add_doc(DbName, <<"doc0">>, Size, AttSize). add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) ->