From 65cb34d317688bd49cb9ee8ea53e294d8bc3a598 Mon Sep 17 00:00:00 2001 From: kwsc98 <45926476+kwsc98@users.noreply.github.com> Date: Tue, 27 Feb 2024 11:46:58 +0800 Subject: [PATCH] Interface support (#2) * 20240226 * 20240226 * support interface --------- Co-authored-by: kwsc98 Co-authored-by: kwsc98 --- .github/workflows/github-actions.yml | 5 +- .licenserc.yaml | 1 - Cargo.toml | 6 +- NOTICE | 2 +- application.yaml | 7 +- common/base/src/url.rs | 2 + common/macro/Cargo.toml | 21 + common/macro/LICENSE | 202 +++++++ common/macro/src/lib.rs | 300 +++++++++++ config/src/config.rs | 6 +- config/src/lib.rs | 1 + config/src/router.rs | 71 +++ config/src/service.rs | 1 + dubbo-build/Cargo.toml | 4 +- dubbo-build/src/client.rs | 26 +- dubbo-build/src/prost.rs | 2 - dubbo-build/src/server.rs | 32 +- dubbo/Cargo.toml | 15 +- dubbo/src/cluster/directory.rs | 124 ----- dubbo/src/cluster/failover.rs | 71 +++ dubbo/src/cluster/loadbalance/impls/random.rs | 59 -- .../cluster/loadbalance/impls/roundrobin.rs | 85 --- dubbo/src/cluster/loadbalance/types.rs | 42 -- dubbo/src/cluster/mod.rs | 180 ++----- .../router/condition/condition_router.rs | 59 ++ dubbo/src/cluster/router/condition/matcher.rs | 78 +++ dubbo/src/cluster/router/condition/mod.rs | 3 + .../cluster/router/condition/single_router.rs | 215 ++++++++ .../router/manager/condition_manager.rs | 72 +++ dubbo/src/cluster/router/manager/mod.rs | 3 + .../cluster/router/manager/router_manager.rs | 152 ++++++ .../src/cluster/router/manager/tag_manager.rs | 20 + dubbo/src/cluster/router/mod.rs | 25 + .../cluster/router/nacos_config_center/mod.rs | 1 + .../nacos_config_center/nacos_client.rs | 126 +++++ dubbo/src/cluster/router/router_chain.rs | 56 ++ dubbo/src/cluster/router/tag/mod.rs | 1 + dubbo/src/cluster/router/tag/tag_router.rs | 86 +++ dubbo/src/cluster/router/utils.rs | 16 + dubbo/src/codegen.rs | 7 +- dubbo/src/directory/mod.rs | 264 +++++++++ dubbo/src/framework.rs | 35 +- dubbo/src/invocation.rs | 3 +- dubbo/src/invoker/clone_body.rs | 350 ++++++++++++ dubbo/src/invoker/clone_invoker.rs | 256 +++++++++ dubbo/src/invoker/mod.rs | 19 + dubbo/src/lib.rs | 6 + dubbo/src/loadbalancer/mod.rs | 95 ++++ dubbo/src/param.rs | 9 + dubbo/src/protocol/mod.rs | 28 +- dubbo/src/protocol/triple/triple_invoker.rs | 118 +++- dubbo/src/registry/mod.rs | 80 +-- dubbo/src/registry/n_registry.rs | 203 +++++++ dubbo/src/registry/protocol.rs | 31 +- dubbo/src/registry/types.rs | 47 +- dubbo/src/route/mod.rs | 150 ++++++ dubbo/src/status.rs | 4 + dubbo/src/svc.rs | 76 +++ dubbo/src/triple/client/builder.rs | 80 ++- dubbo/src/triple/client/triple.rs | 247 ++++----- dubbo/src/triple/decode.rs | 64 +-- dubbo/src/triple/encode.rs | 59 +- dubbo/src/triple/mod.rs | 1 + dubbo/src/triple/server/mod.rs | 1 + dubbo/src/triple/server/support.rs | 155 ++++++ dubbo/src/triple/server/triple.rs | 127 ++--- dubbo/src/triple/transport/connection.rs | 79 +-- .../transport/connector/https_connector.rs | 130 ----- dubbo/src/triple/transport/connector/mod.rs | 7 +- dubbo/src/triple/triple_wrapper.rs | 80 +++ dubbo/src/utils/tls.rs | 25 +- examples/echo/Cargo.toml | 15 +- examples/echo/README.md | 18 - examples/echo/README_CN.md | 18 - examples/echo/fixtures/ca.crt | 21 - examples/echo/fixtures/server.crt | 23 - examples/echo/fixtures/server.key | 28 - examples/echo/src/echo-tls/client.rs | 121 ----- examples/echo/src/echo-tls/server.rs | 208 ------- examples/echo/src/echo/client.rs | 2 +- .../echo/src/generated/grpc.examples.echo.rs | 52 +- examples/greeter/Cargo.toml | 7 +- examples/greeter/src/greeter/client.rs | 23 +- examples/greeter/src/greeter/server.rs | 13 +- examples/interface/Cargo.toml | 41 ++ examples/interface/LICENSE | 202 +++++++ examples/interface/README.md | 22 + examples/interface/README_CN.md | 21 + examples/interface/application.yaml | 25 + .../interface/src/client.rs | 46 +- .../mod.rs => examples/interface/src/lib.rs | 22 +- examples/interface/src/server.rs | 63 +++ registry/nacos/Cargo.toml | 4 +- registry/nacos/src/lib.rs | 509 ++++++++++-------- registry/zookeeper/Cargo.toml | 5 +- registry/zookeeper/src/lib.rs | 429 +++++++-------- 96 files changed, 4764 insertions(+), 2188 deletions(-) create mode 100644 common/macro/Cargo.toml create mode 100644 common/macro/LICENSE create mode 100644 common/macro/src/lib.rs create mode 100644 config/src/router.rs delete mode 100644 dubbo/src/cluster/directory.rs create mode 100644 dubbo/src/cluster/failover.rs delete mode 100644 dubbo/src/cluster/loadbalance/impls/random.rs delete mode 100644 dubbo/src/cluster/loadbalance/impls/roundrobin.rs delete mode 100644 dubbo/src/cluster/loadbalance/types.rs create mode 100644 dubbo/src/cluster/router/condition/condition_router.rs create mode 100644 dubbo/src/cluster/router/condition/matcher.rs create mode 100644 dubbo/src/cluster/router/condition/mod.rs create mode 100644 dubbo/src/cluster/router/condition/single_router.rs create mode 100644 dubbo/src/cluster/router/manager/condition_manager.rs create mode 100644 dubbo/src/cluster/router/manager/mod.rs create mode 100644 dubbo/src/cluster/router/manager/router_manager.rs create mode 100644 dubbo/src/cluster/router/manager/tag_manager.rs create mode 100644 dubbo/src/cluster/router/mod.rs create mode 100644 dubbo/src/cluster/router/nacos_config_center/mod.rs create mode 100644 dubbo/src/cluster/router/nacos_config_center/nacos_client.rs create mode 100644 dubbo/src/cluster/router/router_chain.rs create mode 100644 dubbo/src/cluster/router/tag/mod.rs create mode 100644 dubbo/src/cluster/router/tag/tag_router.rs create mode 100644 dubbo/src/cluster/router/utils.rs create mode 100644 dubbo/src/directory/mod.rs create mode 100644 dubbo/src/invoker/clone_body.rs create mode 100644 dubbo/src/invoker/clone_invoker.rs create mode 100644 dubbo/src/invoker/mod.rs create mode 100644 dubbo/src/loadbalancer/mod.rs create mode 100644 dubbo/src/param.rs create mode 100644 dubbo/src/registry/n_registry.rs create mode 100644 dubbo/src/route/mod.rs create mode 100644 dubbo/src/svc.rs create mode 100644 dubbo/src/triple/server/support.rs delete mode 100644 dubbo/src/triple/transport/connector/https_connector.rs create mode 100644 dubbo/src/triple/triple_wrapper.rs delete mode 100644 examples/echo/fixtures/ca.crt delete mode 100644 examples/echo/fixtures/server.crt delete mode 100644 examples/echo/fixtures/server.key delete mode 100644 examples/echo/src/echo-tls/client.rs delete mode 100644 examples/echo/src/echo-tls/server.rs create mode 100644 examples/interface/Cargo.toml create mode 100644 examples/interface/LICENSE create mode 100644 examples/interface/README.md create mode 100644 examples/interface/README_CN.md create mode 100644 examples/interface/application.yaml rename dubbo/src/cluster/loadbalance/mod.rs => examples/interface/src/client.rs (50%) rename dubbo/src/cluster/loadbalance/impls/mod.rs => examples/interface/src/lib.rs (63%) create mode 100644 examples/interface/src/server.rs diff --git a/.github/workflows/github-actions.yml b/.github/workflows/github-actions.yml index b02b3bc9..878a5e13 100644 --- a/.github/workflows/github-actions.yml +++ b/.github/workflows/github-actions.yml @@ -6,9 +6,10 @@ on: push: branches: ["*"] pull_request: - branches: ["*"] + branches: + - '*' + - 'refact/*' - workflow_dispatch: jobs: check: diff --git a/.licenserc.yaml b/.licenserc.yaml index 6bbef770..d5778c31 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -65,7 +65,6 @@ header: # `header` section is configurations for source codes license header. - '.github' - "**/*.yaml" - "**/generated/**" - - "**/fixtures/**" comment: on-failure # on what condition license-eye will comment on the pull request, `on-failure`, `always`, `never`. # license-location-threshold specifies the index threshold where the license header can be located, diff --git a/Cargo.toml b/Cargo.toml index ad39f084..a95a59bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,11 @@ [workspace] +resolver = "2" members = [ "common/logger", "common/utils", "common/extention", "common/base", + "common/macro", "registry/zookeeper", "registry/nacos", "metadata", @@ -11,6 +13,7 @@ members = [ "dubbo", "examples/echo", "examples/greeter", + "examples/interface", "dubbo-build", "remoting/net", "remoting/http", @@ -57,7 +60,6 @@ serde_yaml = "0.9.4" # yaml file parser once_cell = "1.16.0" itertools = "0.10.1" bytes = "1.0" -prost-serde = "0.3.0" -prost-serde-derive = "0.1.2" + diff --git a/NOTICE b/NOTICE index 2744a490..1288b4b6 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Apache Dubbo -Copyright 2018-2024 The Apache Software Foundation +Copyright 2018-2023 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/application.yaml b/application.yaml index d357db14..bec29a67 100644 --- a/application.yaml +++ b/application.yaml @@ -21,4 +21,9 @@ dubbo: references: GreeterClientImpl: url: tri://localhost:20000 - protocol: tri \ No newline at end of file + protocol: tri + routers: + consumer: + - service: "org.apache.dubbo.sample.tri.Greeter" + url: tri://127.0.0.1:20000 + protocol: triple \ No newline at end of file diff --git a/common/base/src/url.rs b/common/base/src/url.rs index 82b026fd..075ce650 100644 --- a/common/base/src/url.rs +++ b/common/base/src/url.rs @@ -45,6 +45,8 @@ impl Url { pub fn from_url(url: &str) -> Option { // url: triple://127.0.0.1:8888/helloworld.Greeter + let binding = urlencoding::decode(url).unwrap(); + let url = binding.as_ref(); let uri = url .parse::() .map_err(|err| { diff --git a/common/macro/Cargo.toml b/common/macro/Cargo.toml new file mode 100644 index 00000000..d76736db --- /dev/null +++ b/common/macro/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "dubbo-macro" +version = "0.3.0" +edition = "2021" +license = "Apache-2.0" +description = "dubbo-macro" +documentation = "https://github.com/apache/dubbo-rust" +repository = "https://github.com/apache/dubbo-rust.git" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +proc-macro = true + +[dependencies] + +prost = "0.11.0" +prettyplease = {version = "0.1"} +proc-macro2 = "1.0" +quote = "1.0" +syn = { version = "2", features = ["full"] } +prost-build = "0.11.1" diff --git a/common/macro/LICENSE b/common/macro/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/common/macro/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/common/macro/src/lib.rs b/common/macro/src/lib.rs new file mode 100644 index 00000000..5a09a427 --- /dev/null +++ b/common/macro/src/lib.rs @@ -0,0 +1,300 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use proc_macro::TokenStream; +use quote::{quote, ToTokens}; +use std::collections::HashMap; +use syn::{ + self, parse_macro_input, FnArg, ImplItem, ItemImpl, ItemTrait, ReturnType, Token, TraitItem, +}; + +#[proc_macro_attribute] +pub fn rpc_trait(attr: TokenStream, item: TokenStream) -> TokenStream { + let (package, version) = parse_attr(attr); + let input = parse_macro_input!(item as ItemTrait); + let item_trait = get_item_trait(input.clone()); + let trait_ident = &input.ident; + let vis = &input.vis; + let items = &input.items; + let mut sig_item = vec![]; + for item in items { + if let TraitItem::Fn(item) = item { + sig_item.push(item.sig.clone()); + } + } + let mut fn_quote = vec![]; + for item in sig_item { + let asyncable = item.asyncness; + let ident = item.ident; + let inputs = item.inputs; + let req = inputs.iter().fold(vec![], |mut vec, e| { + if let FnArg::Typed(req) = e { + vec.push(req.pat.clone()); + } + vec + }); + let output = item.output; + let output_type = match &output { + ReturnType::Default => { + quote! {()} + } + ReturnType::Type(_, res_type) => res_type.to_token_stream(), + }; + let inputs = inputs.iter().fold(vec![], |mut vec, e| { + let mut token = e.to_token_stream(); + if vec.is_empty() { + if let FnArg::Receiver(_r) = e { + token = quote!(&mut self); + } + } + vec.push(token); + vec + }); + fn_quote.push( + quote! { + #[allow(non_snake_case)] + pub #asyncable fn #ident (#(#inputs),*) -> Result<#output_type,dubbo::status::Status> { + let mut req_vec : Vec = vec![]; + #( + let mut res_str = serde_json::to_string(&#req); + if let Err(err) = res_str { + return Err(dubbo::status::Status::new(dubbo::status::Code::InvalidArgument,err.to_string())); + } + req_vec.push(res_str.unwrap()); + )* + let _version : Option<&str> = #version; + let request = Request::new(TripleRequestWrapper::new(req_vec)); + let codec = ProstCodec::< + TripleRequestWrapper, + TripleResponseWrapper + >::default(); + let service_unique = #package.to_owned() + "." + stringify!(#trait_ident); + let method_name = stringify!(#ident).to_string(); + let invocation = dubbo::invocation::RpcInvocation::default() + .with_service_unique_name(service_unique.clone()) + .with_method_name(method_name.clone()); + let path = "/".to_string() + &service_unique + "/" + &method_name; + let path = http::uri::PathAndQuery::from_str( + &path, + ).unwrap(); + let res = self.inner.unary(request, codec, path, invocation).await; + match res { + Ok(res) => { + let response_wrapper = res.into_parts().1; + let res: #output_type = serde_json::from_slice(&response_wrapper.data).unwrap(); + Ok(res) + }, + Err(err) => Err(err) + } + } + } + ); + } + let rpc_client = syn::Ident::new(&format!("{}Rpc", trait_ident), trait_ident.span()); + let expanded = quote! { + use dubbo::triple::client::TripleClient; + use dubbo::triple::triple_wrapper::TripleRequestWrapper; + use dubbo::triple::triple_wrapper::TripleResponseWrapper; + use dubbo::triple::codec::prost::ProstCodec; + use dubbo::invocation::Request; + use dubbo::invocation::Response; + use dubbo::triple::client::builder::ClientBuilder; + use std::str::FromStr; + + #item_trait + + #vis struct #rpc_client { + inner: TripleClient + } + impl #rpc_client { + #( + #fn_quote + )* + pub fn new(builder: ClientBuilder) -> #rpc_client { + #rpc_client {inner: TripleClient::new(builder),} + } + } + }; + TokenStream::from(expanded) +} + +#[proc_macro_attribute] +pub fn rpc_server(attr: TokenStream, item: TokenStream) -> TokenStream { + let (package, version) = parse_attr(attr); + let org_item = parse_macro_input!(item as ItemImpl); + let server_item = get_server_item(org_item.clone()); + let item = org_item.clone(); + let item_trait = &item.trait_.unwrap().1.segments[0].ident; + let item_self = item.self_ty; + let items_ident_fn = item.items.iter().fold(vec![], |mut vec, e| { + if let ImplItem::Fn(fn_item) = e { + vec.push(fn_item.sig.ident.clone()) + } + vec + }); + let items_fn = item.items.iter().fold(vec![], |mut vec, e| { + if let ImplItem::Fn(fn_item) = e { + let method = &fn_item.sig.ident; + let mut req_pat = vec![]; + let req = fn_item.sig.inputs.iter().fold(vec![], |mut vec, e| { + if let FnArg::Typed(input) = e { + let req = &input.pat; + let req_type = &input.ty; + let token = quote! { + let result : Result<#req_type,_> = serde_json::from_slice(param_req[idx].as_bytes()); + if let Err(err) = result { + param.res = Err(dubbo::status::Status::new(dubbo::status::Code::InvalidArgument,err.to_string())); + return param; + } + let #req : #req_type = result.unwrap(); + idx += 1; + }; + req_pat.push(req); + vec.push(token); + } + vec + }, + ); + vec.push(quote! { + if ¶m.method_name[..] == stringify!(#method) { + let param_req = ¶m.req; + let mut idx = 0; + #( + #req + )* + let res = self.#method( + #( + #req_pat, + )* + ).await; + param.res = match res { + Ok(res) => { + let res = serde_json::to_string(&res).unwrap(); + Ok(res) + }, + Err(info) => Err(info) + }; + return param; + } + } + ) + } + vec + }); + let expanded = quote! { + #server_item + use dubbo::triple::server::support::RpcServer; + use dubbo::triple::server::support::RpcFuture; + use dubbo::triple::server::support::RpcMsg; + + impl RpcServer for #item_self { + fn invoke (&self, param : RpcMsg) -> RpcFuture { + let mut rpc = self.clone(); + Box::pin(async move {rpc.prv_invoke(param).await}) + } + fn get_info(&self) -> (&str , &str , Option<&str> , Vec) { + let mut methods = vec![]; + #( + methods.push(stringify!(#items_ident_fn).to_string()); + )* + (#package ,stringify!(#item_trait) , #version ,methods) + } + } + + impl #item_self { + async fn prv_invoke (&self, mut param : RpcMsg) -> RpcMsg { + #(#items_fn)* + param.res = Err( + dubbo::status::Status::new(dubbo::status::Code::NotFound,format!("not find method by {}",param.method_name)) + ); + return param; + } + } + }; + expanded.into() +} + +fn parse_attr(attr: TokenStream) -> (proc_macro2::TokenStream, proc_macro2::TokenStream) { + let mut map = HashMap::new(); + let attr = attr.clone().to_string(); + let args: Vec<&str> = attr.split(",").collect(); + for arg in args { + let arg = arg.replace(" ", ""); + let item: Vec<&str> = arg.split("=").collect(); + map.insert( + item[0].to_string().clone(), + item[1].replace("\"", "").to_string().clone(), + ); + } + let package = map.get("package").map_or("krpc", |e| e); + let package = quote!(#package); + let version = match map.get("version").map(|e| e.to_string()) { + None => quote!(None), + Some(version) => quote!(Some(&#version)), + }; + return (package, version); +} + +fn get_server_item(item: ItemImpl) -> proc_macro2::TokenStream { + let impl_item = item.impl_token; + let trait_ident = item.trait_.unwrap().1; + let ident = item.self_ty.to_token_stream(); + let fn_items = item.items.iter().fold(vec![], |mut vec, e| { + if let ImplItem::Fn(fn_item) = e { + vec.push(fn_item); + } + vec + }); + quote! { + #impl_item #trait_ident for #ident { + #( + #[allow(non_snake_case)] + #fn_items + )* + } + } +} + +fn get_item_trait(item: ItemTrait) -> proc_macro2::TokenStream { + let trait_ident = &item.ident; + let item_fn = item.items.iter().fold(vec![], |mut vec, e| { + if let TraitItem::Fn(item_fn) = e { + let asyncable = &item_fn.sig.asyncness; + let ident = &item_fn.sig.ident; + let inputs = &item_fn.sig.inputs; + let output_type = match &item_fn.sig.output { + ReturnType::Default => { + quote! {()} + } + ReturnType::Type(_, res_type) => res_type.to_token_stream(), + }; + vec.push(quote!( + #asyncable fn #ident (#inputs) -> Result<#output_type,dubbo::status::Status>; + )); + } + vec + }); + quote! { + pub trait #trait_ident { + #( + #[allow(async_fn_in_trait)] + #[allow(non_snake_case)] + #item_fn + )* + } + } +} diff --git a/config/src/config.rs b/config/src/config.rs index 646873d1..cf4e4415 100644 --- a/config/src/config.rs +++ b/config/src/config.rs @@ -17,7 +17,7 @@ use std::{collections::HashMap, env, path::PathBuf}; -use crate::{protocol::Protocol, registry::RegistryConfig}; +use crate::{protocol::Protocol, registry::RegistryConfig, router::RouterConfig}; use dubbo_logger::tracing; use dubbo_utils::yaml_util::yaml_file_parser; use once_cell::sync::OnceCell; @@ -44,6 +44,9 @@ pub struct RootConfig { #[serde(default)] pub registries: HashMap, + #[serde(default)] + pub routers: RouterConfig, + #[serde(default)] pub data: HashMap, } @@ -63,6 +66,7 @@ impl RootConfig { protocols: HashMap::new(), registries: HashMap::new(), provider: ProviderConfig::new(), + routers: RouterConfig::default(), data: HashMap::new(), } } diff --git a/config/src/lib.rs b/config/src/lib.rs index 0748c667..6fa38801 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -21,4 +21,5 @@ pub mod config; pub mod protocol; pub mod provider; pub mod registry; +pub mod router; pub mod service; diff --git a/config/src/router.rs b/config/src/router.rs new file mode 100644 index 00000000..b45bd478 --- /dev/null +++ b/config/src/router.rs @@ -0,0 +1,71 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)] +pub struct ConditionRouterConfig { + #[serde(rename = "configVersion")] + pub config_version: String, + pub scope: String, + pub force: bool, + pub enabled: bool, + pub key: String, + pub conditions: Vec, +} + +#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq)] +pub struct TagRouterConfig { + #[serde(rename = "configVersion")] + pub config_version: String, + pub force: bool, + pub enabled: bool, + pub key: String, + pub tags: Vec, +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Default, Debug)] +pub struct ConsumerConfig { + pub service: String, + pub url: String, + pub protocol: String, +} + +#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq)] +pub struct Tag { + pub name: String, + #[serde(rename = "match")] + pub matches: Vec, +} + +#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq)] +pub struct TagMatchRule { + pub key: String, + pub value: String, +} + +impl ConditionRouterConfig { + pub fn new(config: &String) -> Self { + serde_yaml::from_str(config).expect("parse error") + } +} + +#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] +pub struct EnableAuth { + pub auth_username: String, + pub auth_password: String, +} + +#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] +pub struct NacosConfig { + pub addr: String, + pub namespace: String, + pub app: String, + pub enable_auth: Option, + pub enable_auth_plugin_http: Option, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)] +pub struct RouterConfig { + pub consumer: Option>, + pub nacos: Option, + pub conditions: Option>, + pub tags: Option, +} diff --git a/config/src/service.rs b/config/src/service.rs index 1f85a926..282faf61 100644 --- a/config/src/service.rs +++ b/config/src/service.rs @@ -22,6 +22,7 @@ pub struct ServiceConfig { pub version: String, pub group: String, pub protocol: String, + pub serialization: Option, pub interface: String, } diff --git a/dubbo-build/Cargo.toml b/dubbo-build/Cargo.toml index 1ca031da..3276a683 100644 --- a/dubbo-build/Cargo.toml +++ b/dubbo-build/Cargo.toml @@ -10,9 +10,9 @@ repository = "https://github.com/apache/dubbo-rust.git" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.11.9" +prost = "0.11.0" prettyplease = {version = "0.1"} proc-macro2 = "1.0" quote = "1.0" syn = "1.0" -prost-build = "0.11.9" +prost-build = "0.11.1" diff --git a/dubbo-build/src/client.rs b/dubbo-build/src/client.rs index af32b64b..b222f752 100644 --- a/dubbo-build/src/client.rs +++ b/dubbo-build/src/client.rs @@ -20,6 +20,8 @@ use crate::{Method, Service}; use proc_macro2::TokenStream; use quote::{format_ident, quote}; +pub const CODEC_PATH: &str = "dubbo::codegen::ProstCodec"; + /// Generate service for client. /// /// This takes some `Service` and will generate a `TokenStream` that contains @@ -63,7 +65,7 @@ pub fn generate( #service_doc #(#struct_attributes)* - #[derive(Debug, Clone, Default)] + #[derive(Clone)] pub struct #service_ident { inner: TripleClient, } @@ -76,12 +78,6 @@ pub fn generate( } } - // pub fn build(builder: ClientBuilder) -> Self { - // Self { - // inner: TripleClient::new(builder), - // } - // } - pub fn new(builder: ClientBuilder) -> Self { Self { inner: TripleClient::new(builder), @@ -165,6 +161,7 @@ fn generate_unary( compile_well_known_types: bool, path: String, ) -> TokenStream { + let codec_name = syn::parse_str::(CODEC_PATH).unwrap(); let ident = format_ident!("{}", method.name()); let (request, response) = method.request_response_name(proto_path, compile_well_known_types); let method_name = method.identifier(); @@ -174,12 +171,14 @@ fn generate_unary( &mut self, request: Request<#request>, ) -> Result, dubbo::status::Status> { + let codec = #codec_name::<#request, #response>::default(); let invocation = RpcInvocation::default() .with_service_unique_name(String::from(#service_unique_name)) .with_method_name(String::from(#method_name)); let path = http::uri::PathAndQuery::from_static(#path); self.inner.unary( request, + codec, path, invocation, ).await @@ -194,7 +193,9 @@ fn generate_server_streaming( compile_well_known_types: bool, path: String, ) -> TokenStream { + let codec_name = syn::parse_str::(CODEC_PATH).unwrap(); let ident = format_ident!("{}", method.name()); + let (request, response) = method.request_response_name(proto_path, compile_well_known_types); let method_name = method.identifier(); @@ -203,12 +204,15 @@ fn generate_server_streaming( &mut self, request: Request<#request>, ) -> Result>, dubbo::status::Status> { + + let codec = #codec_name::<#request, #response>::default(); let invocation = RpcInvocation::default() .with_service_unique_name(String::from(#service_unique_name)) .with_method_name(String::from(#method_name)); let path = http::uri::PathAndQuery::from_static(#path); self.inner.server_streaming( request, + codec, path, invocation, ).await @@ -223,7 +227,9 @@ fn generate_client_streaming( compile_well_known_types: bool, path: String, ) -> TokenStream { + let codec_name = syn::parse_str::(CODEC_PATH).unwrap(); let ident = format_ident!("{}", method.name()); + let (request, response) = method.request_response_name(proto_path, compile_well_known_types); let method_name = method.identifier(); @@ -232,12 +238,14 @@ fn generate_client_streaming( &mut self, request: impl IntoStreamingRequest ) -> Result, dubbo::status::Status> { + let codec = #codec_name::<#request, #response>::default(); let invocation = RpcInvocation::default() .with_service_unique_name(String::from(#service_unique_name)) .with_method_name(String::from(#method_name)); let path = http::uri::PathAndQuery::from_static(#path); self.inner.client_streaming( request, + codec, path, invocation, ).await @@ -252,7 +260,9 @@ fn generate_streaming( compile_well_known_types: bool, path: String, ) -> TokenStream { + let codec_name = syn::parse_str::(CODEC_PATH).unwrap(); let ident = format_ident!("{}", method.name()); + let (request, response) = method.request_response_name(proto_path, compile_well_known_types); let method_name = method.identifier(); @@ -261,12 +271,14 @@ fn generate_streaming( &mut self, request: impl IntoStreamingRequest ) -> Result>, dubbo::status::Status> { + let codec = #codec_name::<#request, #response>::default(); let invocation = RpcInvocation::default() .with_service_unique_name(String::from(#service_unique_name)) .with_method_name(String::from(#method_name)); let path = http::uri::PathAndQuery::from_static(#path); self.inner.bidi_streaming( request, + codec, path, invocation, ).await diff --git a/dubbo-build/src/prost.rs b/dubbo-build/src/prost.rs index 83864695..3918fd30 100644 --- a/dubbo-build/src/prost.rs +++ b/dubbo-build/src/prost.rs @@ -93,8 +93,6 @@ impl Builder { PathBuf::from(std::env::var("OUT_DIR").unwrap()) }; config.out_dir(out_dir); - config.type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]"); - config.message_attribute(".", "#[serde(default)]"); if self.compile_well_known_types { config.compile_well_known_types(); diff --git a/dubbo-build/src/server.rs b/dubbo-build/src/server.rs index 3ccb52ad..4dbbc9d2 100644 --- a/dubbo-build/src/server.rs +++ b/dubbo-build/src/server.rs @@ -21,6 +21,8 @@ use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{Ident, Lit, LitStr}; +pub const CODEC_PATH: &str = "dubbo::codegen::ProstCodec"; + /// Generate service for Server. /// /// This takes some `Service` and will generate a `TokenStream` that contains @@ -328,6 +330,8 @@ fn generate_unary( method_ident: Ident, server_trait: Ident, ) -> TokenStream { + let codec_name = syn::parse_str::(CODEC_PATH).unwrap(); + let service_ident = quote::format_ident!("{}Server", method.identifier()); let (request, response) = method.request_response_name(proto_path, compile_well_known_types); @@ -350,11 +354,16 @@ fn generate_unary( Box::pin(fut) } } + let fut = async move { - let mut server = TripleServer::<#request,#response>::new(); + let mut server = TripleServer::new( + #codec_name::<#response, #request>::default() + ); + let res = server.unary(#service_ident { inner }, req).await; Ok(res) }; + Box::pin(fut) } } @@ -366,6 +375,8 @@ fn generate_server_streaming( method_ident: Ident, server_trait: Ident, ) -> TokenStream { + let codec_name = syn::parse_str::(CODEC_PATH).unwrap(); + let service_ident = quote::format_ident!("{}Server", method.identifier()); let (request, response) = method.request_response_name(proto_path, compile_well_known_types); @@ -391,8 +402,12 @@ fn generate_server_streaming( Box::pin(fut) } } + let fut = async move { - let mut server = TripleServer::<#request,#response>::new(); + let mut server = TripleServer::new( + #codec_name::<#response, #request>::default() + ); + let res = server.server_streaming(#service_ident { inner }, req).await; Ok(res) }; @@ -411,6 +426,7 @@ fn generate_client_streaming( let service_ident = quote::format_ident!("{}Server", method.identifier()); let (request, response) = method.request_response_name(proto_path, compile_well_known_types); + let codec_name = syn::parse_str::(CODEC_PATH).unwrap(); quote! { #[allow(non_camel_case_types)] @@ -434,7 +450,10 @@ fn generate_client_streaming( } let fut = async move { - let mut server = TripleServer::<#request,#response>::new(); + let mut server = TripleServer::new( + #codec_name::<#response, #request>::default() + ); + let res = server.client_streaming(#service_ident { inner }, req).await; Ok(res) }; @@ -450,6 +469,8 @@ fn generate_streaming( method_ident: Ident, server_trait: Ident, ) -> TokenStream { + let codec_name = syn::parse_str::(CODEC_PATH).unwrap(); + let service_ident = quote::format_ident!("{}Server", method.identifier()); let (request, response) = method.request_response_name(proto_path, compile_well_known_types); @@ -478,7 +499,10 @@ fn generate_streaming( } let fut = async move { - let mut server = TripleServer::<#request,#response>::new(); + let mut server = TripleServer::new( + #codec_name::<#response, #request>::default() + ); + let res = server.bidi_streaming(#service_ident { inner }, req).await; Ok(res) }; diff --git a/dubbo/Cargo.toml b/dubbo/Cargo.toml index 8c2821ec..84874748 100644 --- a/dubbo/Cargo.toml +++ b/dubbo/Cargo.toml @@ -14,16 +14,16 @@ hyper = { version = "0.14.26", features = ["full"] } http = "0.2" tower-service.workspace = true http-body = "0.4.4" -tower = { workspace = true, features = ["timeout"] } +tower = { workspace = true, features = ["timeout", "ready-cache","discover","retry"] } futures-util = "0.3.23" futures-core ="0.3.23" argh = "0.1" rustls-pemfile = "1.0.0" -rustls-webpki = "0.101.3" -rustls-native-certs = "0.6.3" -tokio-rustls="0.24.1" +tokio-rustls="0.23.4" tokio = { version = "1.0", features = [ "rt-multi-thread", "time", "fs", "macros", "net", "signal", "full" ] } -prost = "0.11.9" +tokio-util = "0.7.9" +tokio-stream = "0.1" +prost = "0.10.4" async-trait = "0.1.56" tower-layer.workspace = true bytes.workspace = true @@ -42,8 +42,13 @@ urlencoding.workspace = true lazy_static.workspace = true dubbo-base.workspace = true dubbo-logger.workspace = true +once_cell.workspace = true dubbo-config = { path = "../config", version = "0.3.0" } #对象存储 state = { version = "0.5", features = ["tls"] } +thiserror = "1.0.48" +regex = "1.9.1" +nacos-sdk = { version = "0.3.0", features = ["default"] } +serde_yaml = "0.9.22" diff --git a/dubbo/src/cluster/directory.rs b/dubbo/src/cluster/directory.rs deleted file mode 100644 index 144f0111..00000000 --- a/dubbo/src/cluster/directory.rs +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::{ - collections::HashMap, - fmt::Debug, - str::FromStr, - sync::{Arc, RwLock}, -}; - -use crate::{ - codegen::TripleInvoker, - invocation::{Invocation, RpcInvocation}, - protocol::BoxInvoker, - registry::{memory_registry::MemoryNotifyListener, BoxRegistry}, -}; -use dubbo_base::Url; -use dubbo_logger::tracing; - -use crate::cluster::Directory; - -/// Directory. -/// -/// [Directory Service](http://en.wikipedia.org/wiki/Directory_service) - -#[derive(Debug, Clone)] -pub struct StaticDirectory { - uri: http::Uri, -} - -impl StaticDirectory { - pub fn new(host: &str) -> StaticDirectory { - let uri = match http::Uri::from_str(host) { - Ok(v) => v, - Err(err) => { - tracing::error!("http uri parse error: {}, host: {}", err, host); - panic!("http uri parse error: {}, host: {}", err, host) - } - }; - StaticDirectory { uri: uri } - } - - pub fn from_uri(uri: &http::Uri) -> StaticDirectory { - StaticDirectory { uri: uri.clone() } - } -} - -impl Directory for StaticDirectory { - fn list(&self, invocation: Arc) -> Vec { - let url = Url::from_url(&format!( - "{}://{}:{}/{}", - self.uri.scheme_str().unwrap_or("tri"), - self.uri.host().unwrap(), - self.uri.port().unwrap(), - invocation.get_target_service_unique_name(), - )) - .unwrap(); - let invoker = Box::new(TripleInvoker::new(url)); - vec![invoker] - } -} - -#[derive(Debug, Clone)] -pub struct RegistryDirectory { - registry: Arc, - service_instances: Arc>>>, -} - -impl RegistryDirectory { - pub fn new(registry: BoxRegistry) -> RegistryDirectory { - RegistryDirectory { - registry: Arc::new(registry), - service_instances: Arc::new(RwLock::new(HashMap::new())), - } - } -} - -impl Directory for RegistryDirectory { - fn list(&self, invocation: Arc) -> Vec { - let service_name = invocation.get_target_service_unique_name(); - - let url = Url::from_url(&format!( - "triple://{}:{}/{}", - "127.0.0.1", "8888", service_name - )) - .unwrap(); - - self.registry - .subscribe( - url, - Arc::new(MemoryNotifyListener { - service_instances: Arc::clone(&self.service_instances), - }), - ) - .expect("subscribe"); - - let map = self - .service_instances - .read() - .expect("service_instances.read"); - let binding = Vec::new(); - let url_vec = map.get(&service_name).unwrap_or(&binding); - // url_vec.to_vec() - let mut invokers: Vec = vec![]; - for item in url_vec.iter() { - invokers.push(Box::new(TripleInvoker::new(item.clone()))); - } - invokers - } -} diff --git a/dubbo/src/cluster/failover.rs b/dubbo/src/cluster/failover.rs new file mode 100644 index 00000000..8a00c9fb --- /dev/null +++ b/dubbo/src/cluster/failover.rs @@ -0,0 +1,71 @@ +use std::task::Poll; + +use futures_util::future; +use http::Request; +use tower::{retry::Retry, util::Oneshot, ServiceExt}; +use tower_service::Service; + +use crate::StdError; + +pub struct Failover { + inner: N, // loadbalancer service +} + +#[derive(Clone)] +pub struct FailoverPolicy; + +impl Failover { + pub fn new(inner: N) -> Self { + Self { inner } + } +} + +impl tower::retry::Policy, Res, E> for FailoverPolicy +where + B: http_body::Body + Clone, +{ + type Future = future::Ready; + + fn retry(&self, _req: &Request, result: Result<&Res, &E>) -> Option { + //TODO some error handling or logging + match result { + Ok(_) => None, + Err(_) => Some(future::ready(self.clone())), + } + } + + fn clone_request(&self, req: &Request) -> Option> { + let mut clone = http::Request::new(req.body().clone()); + *clone.method_mut() = req.method().clone(); + *clone.uri_mut() = req.uri().clone(); + *clone.headers_mut() = req.headers().clone(); + *clone.version_mut() = req.version(); + + Some(clone) + } +} + +impl Service> for Failover +where + // B is CloneBody + B: http_body::Body + Clone, + // loadbalancer service + N: Service> + Clone + 'static, + N::Error: Into, + N::Future: Send, +{ + type Response = N::Response; + + type Error = N::Error; + + type Future = Oneshot, Request>; + + fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let retry = Retry::new(FailoverPolicy, self.inner.clone()); + retry.oneshot(req) + } +} diff --git a/dubbo/src/cluster/loadbalance/impls/random.rs b/dubbo/src/cluster/loadbalance/impls/random.rs deleted file mode 100644 index 3e1cf651..00000000 --- a/dubbo/src/cluster/loadbalance/impls/random.rs +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -use dubbo_base::Url; -use std::{ - fmt::{Debug, Formatter}, - sync::Arc, -}; - -use crate::{ - cluster::loadbalance::types::{LoadBalance, Metadata}, - codegen::RpcInvocation, -}; - -pub struct RandomLoadBalance { - pub metadata: Metadata, -} - -impl Default for RandomLoadBalance { - fn default() -> Self { - RandomLoadBalance { - metadata: Metadata::new("random"), - } - } -} - -impl Debug for RandomLoadBalance { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "RandomLoadBalance") - } -} - -impl LoadBalance for RandomLoadBalance { - fn select( - &self, - invokers: Arc>, - _url: Option, - _invocation: Arc, - ) -> Option { - if invokers.is_empty() { - return None; - } - let index = rand::random::() % invokers.len(); - Some(invokers[index].clone()) - } -} diff --git a/dubbo/src/cluster/loadbalance/impls/roundrobin.rs b/dubbo/src/cluster/loadbalance/impls/roundrobin.rs deleted file mode 100644 index 5fd0ed49..00000000 --- a/dubbo/src/cluster/loadbalance/impls/roundrobin.rs +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -use dubbo_base::Url; -use std::{ - collections::HashMap, - fmt::{Debug, Formatter}, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, RwLock, - }, -}; - -use crate::{ - cluster::loadbalance::types::{LoadBalance, Metadata}, - codegen::RpcInvocation, -}; - -pub struct RoundRobinLoadBalance { - pub metadata: Metadata, - pub counter_map: RwLock>, -} - -impl Default for RoundRobinLoadBalance { - fn default() -> Self { - RoundRobinLoadBalance { - metadata: Metadata::new("roundrobin"), - counter_map: RwLock::new(HashMap::new()), - } - } -} - -impl Debug for RoundRobinLoadBalance { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "RoundRobinLoadBalance") - } -} - -impl RoundRobinLoadBalance { - fn guarantee_counter_key(&self, key: &str) { - let contained = self.counter_map.try_read().unwrap().contains_key(key); - if !contained { - self.counter_map - .try_write() - .unwrap() - .insert(key.to_string(), AtomicUsize::new(0)); - } - } -} - -impl LoadBalance for RoundRobinLoadBalance { - fn select( - &self, - invokers: Arc>, - _url: Option, - invocation: Arc, - ) -> Option { - if invokers.is_empty() { - return None; - } - let fingerprint = invocation.unique_fingerprint(); - self.guarantee_counter_key(fingerprint.as_str()); - let index = self - .counter_map - .try_read() - .unwrap() - .get(fingerprint.as_str())? - .fetch_add(1, Ordering::SeqCst) - % invokers.len(); - Some(invokers[index].clone()) - } -} diff --git a/dubbo/src/cluster/loadbalance/types.rs b/dubbo/src/cluster/loadbalance/types.rs deleted file mode 100644 index 9273d07e..00000000 --- a/dubbo/src/cluster/loadbalance/types.rs +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use dubbo_base::Url; -use std::{fmt::Debug, sync::Arc}; - -use crate::codegen::RpcInvocation; - -pub type BoxLoadBalance = Box; - -pub trait LoadBalance: Debug { - fn select( - &self, - invokers: Arc>, - url: Option, - invocation: Arc, - ) -> Option; -} - -pub struct Metadata { - pub name: &'static str, -} - -impl Metadata { - pub fn new(name: &'static str) -> Self { - Metadata { name } - } -} diff --git a/dubbo/src/cluster/mod.rs b/dubbo/src/cluster/mod.rs index d1f96f95..1a20c160 100644 --- a/dubbo/src/cluster/mod.rs +++ b/dubbo/src/cluster/mod.rs @@ -15,165 +15,71 @@ * limitations under the License. */ -use std::{collections::HashMap, fmt::Debug, sync::Arc, task::Poll}; - -use aws_smithy_http::body::SdkBody; -use dubbo_base::Url; -use dyn_clone::DynClone; +use http::Request; +use tower_service::Service; use crate::{ - empty_body, - invocation::RpcInvocation, - protocol::{BoxInvoker, Invoker}, + codegen::RpcInvocation, invoker::clone_body::CloneBody, param::Param, svc::NewService, }; -pub mod directory; -pub mod loadbalance; - -pub trait Directory: Debug + DynClone { - fn list(&self, invocation: Arc) -> Vec; - // fn is_empty(&self) -> bool; -} +use self::failover::Failover; -dyn_clone::clone_trait_object!(Directory); +mod failover; -type BoxDirectory = Box; - -pub trait Cluster { - fn join(&self, dir: BoxDirectory) -> BoxInvoker; +pub struct NewCluster { + inner: N, // new loadbalancer service } -#[derive(Debug, Default)] -pub struct MockCluster {} - -impl Cluster for MockCluster { - fn join(&self, dir: BoxDirectory) -> BoxInvoker { - Box::new(FailoverCluster::new(dir)) - } -} -#[derive(Clone, Debug)] -pub struct FailoverCluster { - dir: Arc, +pub struct Cluster { + inner: S, // failover service } -impl FailoverCluster { - pub fn new(dir: BoxDirectory) -> FailoverCluster { - Self { dir: Arc::new(dir) } - } -} - -impl Invoker> for FailoverCluster { - type Response = http::Response; - - type Error = crate::Error; - - type Future = crate::BoxFuture; - - fn poll_ready( - &mut self, - _cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - // if self.dir.is_empty() return err - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: http::Request) -> Self::Future { - // let clone_body = req.body().try_clone().unwrap(); - // let mut clone_req = http::Request::builder() - // .uri(req.uri().clone()) - // .method(req.method().clone()); - // *clone_req.headers_mut().unwrap() = req.headers().clone(); - // let r = clone_req.body(clone_body).unwrap(); - let invokers = self.dir.list( - RpcInvocation::default() - .with_service_unique_name("hello".to_string()) - .into(), - ); - for mut invoker in invokers { - let fut = async move { - let res = invoker.call(req).await; - return res; - }; - return Box::pin(fut); - } - Box::pin(async move { - Ok(http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap()) +impl NewCluster { + pub fn layer() -> impl tower_layer::Layer { + tower_layer::layer_fn(|inner: N| { + NewCluster { + inner, // new loadbalancer service + } }) } - - fn get_url(&self) -> dubbo_base::Url { - Url::from_url("triple://127.0.0.1:8888/helloworld.Greeter").unwrap() - } } -#[derive(Debug, Default, Clone)] -pub struct MockDirectory { - // router_chain: RouterChain, - invokers: Vec, -} - -impl MockDirectory { - pub fn new(invokers: Vec) -> MockDirectory { - Self { - // router_chain: RouterChain::default(), - invokers, +impl NewService for NewCluster +where + T: Param, + // new loadbalancer service + S: NewService, +{ + type Service = Cluster>; + + fn new_service(&self, target: T) -> Self::Service { + Cluster { + inner: Failover::new(self.inner.new_service(target)), } } } -impl Directory for MockDirectory { - fn list(&self, _invo: Arc) -> Vec { - // tracing::info!("MockDirectory: {}", meta); - let _u = Url::from_url("triple://127.0.0.1:8888/helloworld.Greeter").unwrap(); - // vec![Box::new(TripleInvoker::new(u))] - // self.router_chain.route(u, invo); - self.invokers.clone() - } +impl Service> for Cluster +where + S: Service>, +{ + type Response = S::Response; - // fn is_empty(&self) -> bool { - // false - // } -} + type Error = S::Error; -#[derive(Debug, Default)] -pub struct RouterChain { - router: HashMap, - invokers: Vec, -} + type Future = S::Future; -impl RouterChain { - pub fn route(&self, url: Url, invo: Arc) -> Vec { - let r = self.router.get("mock").unwrap(); - r.route(self.invokers.clone(), url, invo) + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.inner.poll_ready(cx) } -} - -pub trait Router: Debug { - fn route( - &self, - invokers: Vec, - url: Url, - invo: Arc, - ) -> Vec; -} - -pub type BoxRouter = Box; - -#[derive(Debug, Default)] -pub struct MockRouter {} -impl Router for MockRouter { - fn route( - &self, - invokers: Vec, - _url: Url, - _invo: Arc, - ) -> Vec { - invokers + fn call(&mut self, req: Request) -> Self::Future { + let (parts, body) = req.into_parts(); + let clone_body = CloneBody::new(body); + let req = Request::from_parts(parts, clone_body); + self.inner.call(req) } } diff --git a/dubbo/src/cluster/router/condition/condition_router.rs b/dubbo/src/cluster/router/condition/condition_router.rs new file mode 100644 index 00000000..73aca005 --- /dev/null +++ b/dubbo/src/cluster/router/condition/condition_router.rs @@ -0,0 +1,59 @@ +use crate::{ + cluster::router::{condition::single_router::ConditionSingleRouter, Router}, + codegen::RpcInvocation, +}; +use dubbo_base::Url; +use std::{ + fmt::Debug, + sync::{Arc, RwLock}, +}; + +#[derive(Default, Debug, Clone)] +pub struct ConditionRouter { + //condition router for service scope + pub service_routers: Option>>, + //condition router for application scope + pub application_routers: Option>>, +} + +impl Router for ConditionRouter { + fn route(&self, mut invokers: Vec, url: Url, invo: Arc) -> Vec { + if let Some(routers) = &self.application_routers { + for router in &routers.read().unwrap().routers { + invokers = router.route(invokers, url.clone(), invo.clone()); + } + } + if let Some(routers) = &self.service_routers { + for router in &routers.read().unwrap().routers { + invokers = router.route(invokers, url.clone(), invo.clone()); + } + } + invokers + } +} + +impl ConditionRouter { + pub fn new( + service_routers: Option>>, + application_routers: Option>>, + ) -> Self { + Self { + service_routers, + application_routers, + } + } +} + +#[derive(Debug, Clone, Default)] +pub struct ConditionSingleRouters { + pub routers: Vec, +} + +impl ConditionSingleRouters { + pub fn new(routers: Vec) -> Self { + Self { routers } + } + pub fn is_null(&self) -> bool { + self.routers.is_empty() + } +} diff --git a/dubbo/src/cluster/router/condition/matcher.rs b/dubbo/src/cluster/router/condition/matcher.rs new file mode 100644 index 00000000..92bbe2da --- /dev/null +++ b/dubbo/src/cluster/router/condition/matcher.rs @@ -0,0 +1,78 @@ +use regex::Regex; +use std::{collections::HashSet, error::Error, option::Option}; + +#[derive(Clone, Debug, Default)] +pub struct ConditionMatcher { + _key: String, + matches: HashSet, + mismatches: HashSet, +} + +impl ConditionMatcher { + pub fn new(_key: String) -> Self { + ConditionMatcher { + _key, + matches: HashSet::new(), + mismatches: HashSet::new(), + } + } + + pub fn is_match(&self, value: Option) -> Result> { + match value { + None => Ok(false), + Some(val) => { + for match_ in self.matches.iter() { + if self.do_pattern_match(match_, &val) { + return Ok(true); + } + } + for mismatch in self.mismatches.iter() { + if !self.do_pattern_match(mismatch, &val) { + return Ok(true); + } + } + Ok(false) + } + } + } + + pub fn get_matches(&mut self) -> &mut HashSet { + &mut self.matches + } + pub fn get_mismatches(&mut self) -> &mut HashSet { + &mut self.mismatches + } + + fn do_pattern_match(&self, pattern: &str, value: &str) -> bool { + if pattern.contains('*') { + return star_matcher(pattern, value); + } + + if pattern.contains('~') { + let parts: Vec<&str> = pattern.split('~').collect(); + + if parts.len() == 2 { + if let (Ok(left), Ok(right), Ok(val)) = ( + parts[0].parse::(), + parts[1].parse::(), + value.parse::(), + ) { + return range_matcher(val, left, right); + } + } + return false; + } + pattern == value + } +} + +pub fn star_matcher(pattern: &str, input: &str) -> bool { + // 将*替换为任意字符的正则表达式 + let pattern = pattern.replace("*", ".*"); + let regex = Regex::new(&pattern).unwrap(); + regex.is_match(input) +} + +pub fn range_matcher(val: i32, min: i32, max: i32) -> bool { + min <= val && val <= max +} diff --git a/dubbo/src/cluster/router/condition/mod.rs b/dubbo/src/cluster/router/condition/mod.rs new file mode 100644 index 00000000..7285b88f --- /dev/null +++ b/dubbo/src/cluster/router/condition/mod.rs @@ -0,0 +1,3 @@ +pub mod condition_router; +pub mod matcher; +pub mod single_router; diff --git a/dubbo/src/cluster/router/condition/single_router.rs b/dubbo/src/cluster/router/condition/single_router.rs new file mode 100644 index 00000000..5f06aa8f --- /dev/null +++ b/dubbo/src/cluster/router/condition/single_router.rs @@ -0,0 +1,215 @@ +use dubbo_base::Url; +use dubbo_logger::tracing::info; +use regex::Regex; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use crate::{ + cluster::router::{condition::matcher::ConditionMatcher, utils::to_original_map, Router}, + codegen::RpcInvocation, + invocation::Invocation, +}; + +#[derive(Debug, Clone, Default)] +pub struct ConditionSingleRouter { + pub name: String, + pub when_condition: HashMap>>, + pub then_condition: HashMap>>, + pub enabled: bool, + pub force: bool, +} + +impl Router for ConditionSingleRouter { + fn route(&self, invokers: Vec, url: Url, invocation: Arc) -> Vec { + if !self.enabled { + return invokers; + }; + let mut result = Vec::new(); + if self.match_when(url.clone(), invocation.clone()) { + for invoker in &invokers.clone() { + if self.match_then(invoker.clone(), invocation.clone()) { + result.push(invoker.clone()); + } + } + if result.is_empty() && self.force == false { + invokers + } else { + result + } + } else { + invokers + } + } +} + +impl ConditionSingleRouter { + pub fn new(rule: String, force: bool, enabled: bool) -> Self { + let mut router = Self { + name: "condition".to_string(), + when_condition: HashMap::new(), + then_condition: HashMap::new(), + enabled, + force, + }; + if enabled { + router.init(rule).expect("parse rule error"); + } + router + } + + fn init(&mut self, rule: String) -> Result<(), Box> { + match rule.trim().is_empty() { + true => Err("Illegal route rule!".into()), + false => { + let r = rule.replace("consumer.", "").replace("provider.", ""); + let i = r.find("=>").unwrap_or_else(|| r.len()); + let when_rule = r[..i].trim().to_string(); + let then_rule = r[(i + 2)..].trim().to_string(); + let when = if when_rule.is_empty() || when_rule == "true" { + HashMap::new() + } else { + self.parse_rule(&when_rule)? + }; + let then = if then_rule.is_empty() || then_rule == "false" { + HashMap::new() + } else { + self.parse_rule(&then_rule)? + }; + self.when_condition = when; + self.then_condition = then; + Ok(()) + } + } + } + + fn parse_rule( + &mut self, + rule: &str, + ) -> Result>>, Box> { + let mut conditions: HashMap>> = HashMap::new(); + let mut current_matcher: Option>> = None; + let regex = Regex::new(r"([&!=,]*)\s*([^&!=,\s]+)").unwrap(); + for cap in regex.captures_iter(rule) { + let separator = &cap[1]; + let content = &cap[2]; + + match separator { + "" => { + let current_key = content.to_string(); + current_matcher = + Some(Arc::new(RwLock::new(self.get_matcher(current_key.clone())))); + conditions.insert( + current_key.clone(), + current_matcher.as_ref().unwrap().clone(), + ); + } + "&" => { + let current_key = content.to_string(); + current_matcher = conditions.get(¤t_key).cloned(); + if current_matcher.is_none() { + let matcher = Arc::new(RwLock::new(self.get_matcher(current_key.clone()))); + conditions.insert(current_key.clone(), matcher.clone()); + current_matcher = Some(matcher); + } + } + "=" => { + if let Some(matcher) = ¤t_matcher { + let mut matcher_borrowed = matcher.write().unwrap(); + matcher_borrowed + .get_matches() + .insert(content.to_string().clone()); + } else { + return Err("Error: ...".into()); + } + } + "!=" => { + if let Some(matcher) = ¤t_matcher { + let mut matcher_borrowed = matcher.write().unwrap(); + matcher_borrowed + .get_mismatches() + .insert(content.to_string().clone()); + } else { + return Err("Error: ...".into()); + } + } + "," => { + if let Some(matcher) = ¤t_matcher { + let mut matcher_borrowed = matcher.write().unwrap(); + if matcher_borrowed.get_matches().is_empty() + && matcher_borrowed.get_mismatches().is_empty() + { + return Err("Error: ...".into()); + } + drop(matcher_borrowed); + let mut matcher_borrowed_mut = matcher.write().unwrap(); + matcher_borrowed_mut + .get_matches() + .insert(content.to_string().clone()); + } else { + return Err("Error: ...".into()); + } + } + _ => { + return Err("Error: ...".into()); + } + } + } + Ok(conditions) + } + + // pub fn is_runtime(&self) -> bool { + // // same as the Java version + // } + + pub fn get_matcher(&self, key: String) -> ConditionMatcher { + ConditionMatcher::new(key) + } + + pub fn match_when(&self, url: Url, invocation: Arc) -> bool { + if self.when_condition.is_empty() { + return true; + } + self.do_match(url, &self.when_condition, invocation) + } + + pub fn match_then(&self, url: Url, invocation: Arc) -> bool { + if self.then_condition.is_empty() { + return false; + } + self.do_match(url, &self.then_condition, invocation) + } + + pub fn do_match( + &self, + url: Url, + conditions: &HashMap>>, + invocation: Arc, + ) -> bool { + let sample: HashMap = to_original_map(url); + conditions.iter().all(|(key, condition_matcher)| { + let matcher = condition_matcher.read().unwrap(); + let value = get_value(key, &sample, invocation.clone()); + match matcher.is_match(value) { + Ok(result) => result, + Err(error) => { + info!("Error occurred: {:?}", error); + false + } + } + }) + } +} + +fn get_value( + key: &String, + sample: &HashMap, + invocation: Arc, +) -> Option { + if key == "method" { + let method_param = invocation.get_method_name(); + return Some(method_param); + } + sample.get(key).cloned() +} diff --git a/dubbo/src/cluster/router/manager/condition_manager.rs b/dubbo/src/cluster/router/manager/condition_manager.rs new file mode 100644 index 00000000..7ad5e1b6 --- /dev/null +++ b/dubbo/src/cluster/router/manager/condition_manager.rs @@ -0,0 +1,72 @@ +use crate::cluster::router::condition::{ + condition_router::{ConditionRouter, ConditionSingleRouters}, + single_router::ConditionSingleRouter, +}; +use dubbo_config::router::ConditionRouterConfig; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +#[derive(Debug, Clone, Default)] +pub struct ConditionRouterManager { + //Application-level routing applies globally, while service-level routing only affects a specific service. + pub routers_service: HashMap>>, + pub routers_application: Arc>, +} + +impl ConditionRouterManager { + pub fn get_router(&self, service_name: &String) -> Option { + let routers_application_is_null = self.routers_application.read().unwrap().is_null(); + self.routers_service + .get(service_name) + .map(|routers_service| { + ConditionRouter::new( + Some(routers_service.clone()), + if routers_application_is_null { + None + } else { + Some(self.routers_application.clone()) + }, + ) + }) + .or_else(|| { + if routers_application_is_null { + None + } else { + Some(ConditionRouter::new( + None, + Some(self.routers_application.clone()), + )) + } + }) + } + + pub fn update(&mut self, config: ConditionRouterConfig) { + let force = config.force; + let scope = config.scope; + let key = config.key; + let enable = config.enabled; + + let routers = config + .conditions + .into_iter() + .map(|condition| ConditionSingleRouter::new(condition, force, enable)) + .collect::>(); + + match scope.as_str() { + "application" => { + self.routers_application.write().unwrap().routers = routers; + } + "service" => { + self.routers_service + .entry(key) + .or_insert_with(|| Arc::new(RwLock::new(ConditionSingleRouters::new(vec![])))) + .write() + .unwrap() + .routers = routers; + } + _ => {} + } + } +} diff --git a/dubbo/src/cluster/router/manager/mod.rs b/dubbo/src/cluster/router/manager/mod.rs new file mode 100644 index 00000000..025f6c16 --- /dev/null +++ b/dubbo/src/cluster/router/manager/mod.rs @@ -0,0 +1,3 @@ +mod condition_manager; +pub mod router_manager; +mod tag_manager; diff --git a/dubbo/src/cluster/router/manager/router_manager.rs b/dubbo/src/cluster/router/manager/router_manager.rs new file mode 100644 index 00000000..e963181e --- /dev/null +++ b/dubbo/src/cluster/router/manager/router_manager.rs @@ -0,0 +1,152 @@ +use crate::cluster::router::{ + manager::{condition_manager::ConditionRouterManager, tag_manager::TagRouterManager}, + nacos_config_center::nacos_client::NacosClient, + router_chain::RouterChain, +}; +use dubbo_base::Url; +use dubbo_config::{ + get_global_config, + router::{ConditionRouterConfig, NacosConfig, TagRouterConfig}, +}; +use dubbo_logger::tracing::{info, trace}; +use once_cell::sync::OnceCell; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +pub static GLOBAL_ROUTER_MANAGER: OnceCell>> = OnceCell::new(); +const TAG: &str = "tag"; +const CONDITION: &str = "condition"; +pub struct RouterManager { + pub condition_router_manager: ConditionRouterManager, + pub tag_router_manager: TagRouterManager, + pub nacos: Option, + pub consumer: HashMap, +} + +impl RouterManager { + pub fn get_router_chain(&self, service: String) -> RouterChain { + let mut chain = RouterChain::new(); + if let Some(url) = self.consumer.get(service.as_str()) { + if let Some(tag_router) = self.tag_router_manager.get_router(&service) { + chain.add_router(TAG.to_string(), Box::new(tag_router)); + } + if let Some(condition_router) = self.condition_router_manager.get_router(&service) { + chain.add_router(CONDITION.to_string(), Box::new(condition_router)); + } + chain.self_url = url.clone(); + } + chain + } + + pub fn notify(&mut self, event: RouterConfigChangeEvent) { + match event.router_kind.as_str() { + CONDITION => { + let config: ConditionRouterConfig = + serde_yaml::from_str(event.content.as_str()).unwrap(); + self.condition_router_manager.update(config) + } + TAG => { + let config: TagRouterConfig = serde_yaml::from_str(event.content.as_str()).unwrap(); + self.tag_router_manager.update(config) + } + _ => { + info!("other router change event") + } + } + } + + pub fn init_nacos(&mut self, config: NacosConfig) { + self.nacos = Some(NacosClient::new_init_client(config)); + self.init_router_managers_for_nacos(); + } + + fn init_router_managers_for_nacos(&mut self) { + if let Some(tag_config) = self + .nacos + .as_ref() + .and_then(|n| n.get_config("application", TAG, TAG)) + { + self.tag_router_manager.update(tag_config); + } + + if let Some(condition_app_config) = self + .nacos + .as_ref() + .and_then(|n| n.get_config("application", CONDITION, TAG)) + { + self.condition_router_manager.update(condition_app_config); + } + + for (service_name, _) in &self.consumer { + if let Some(condition_config) = self + .nacos + .as_ref() + .and_then(|n| n.get_config(service_name, CONDITION, CONDITION)) + { + self.condition_router_manager.update(condition_config); + } + } + } + + pub fn init(&mut self) { + let config = get_global_config().routers.clone(); + self.init_consumer_configs(); + if let Some(nacos_config) = &config.nacos { + self.init_nacos(nacos_config.clone()); + } else { + trace!("Nacos not configured, using local YAML configuration for routing"); + if let Some(condition_configs) = &config.conditions { + for condition_config in condition_configs { + self.condition_router_manager + .update(condition_config.clone()); + } + } else { + info!("Unconfigured Condition Router") + } + if let Some(tag_config) = &config.tags { + self.tag_router_manager.update(tag_config.clone()); + } else { + info!("Unconfigured Tag Router") + } + } + } + + fn init_consumer_configs(&mut self) { + let consumer_configs = get_global_config() + .routers + .consumer + .clone() + .unwrap_or_else(Vec::new); + + for consumer_config in consumer_configs { + let service_url = Url::from_url( + format!("{}/{}", consumer_config.url, consumer_config.service).as_str(), + ) + .expect("Consumer config error"); + + self.consumer.insert(consumer_config.service, service_url); + } + } +} + +pub fn get_global_router_manager() -> &'static Arc> { + GLOBAL_ROUTER_MANAGER.get_or_init(|| { + let mut router_manager = RouterManager { + condition_router_manager: ConditionRouterManager::default(), + tag_router_manager: TagRouterManager::default(), + nacos: None, + consumer: HashMap::new(), + }; + router_manager.init(); + return Arc::new(RwLock::new(router_manager)); + }) +} + +#[derive(Debug, Default, Clone)] +pub struct RouterConfigChangeEvent { + pub service_name: String, + pub router_kind: String, + pub content: String, +} diff --git a/dubbo/src/cluster/router/manager/tag_manager.rs b/dubbo/src/cluster/router/manager/tag_manager.rs new file mode 100644 index 00000000..8dc24999 --- /dev/null +++ b/dubbo/src/cluster/router/manager/tag_manager.rs @@ -0,0 +1,20 @@ +use crate::cluster::router::tag::tag_router::{TagRouter, TagRouterInner}; +use dubbo_config::router::TagRouterConfig; +use std::sync::{Arc, RwLock}; + +#[derive(Debug, Clone, Default)] +pub struct TagRouterManager { + pub tag_router: Arc>, +} + +impl TagRouterManager { + pub fn get_router(&self, _service_name: &String) -> Option { + Some(TagRouter { + inner: self.tag_router.clone(), + }) + } + + pub fn update(&mut self, config: TagRouterConfig) { + self.tag_router.write().unwrap().parse_config(config); + } +} diff --git a/dubbo/src/cluster/router/mod.rs b/dubbo/src/cluster/router/mod.rs new file mode 100644 index 00000000..17c9aec2 --- /dev/null +++ b/dubbo/src/cluster/router/mod.rs @@ -0,0 +1,25 @@ +pub mod condition; +pub mod manager; +pub mod nacos_config_center; +pub mod router_chain; +pub mod tag; +pub mod utils; + +use crate::invocation::RpcInvocation; +use dubbo_base::Url; +use std::{fmt::Debug, sync::Arc}; + +pub trait Router: Debug { + fn route(&self, invokers: Vec, url: Url, invocation: Arc) -> Vec; +} + +pub type BoxRouter = Box; + +#[derive(Debug, Default, Clone)] +pub struct MockRouter {} + +impl Router for MockRouter { + fn route(&self, invokers: Vec, _url: Url, _invocation: Arc) -> Vec { + invokers + } +} diff --git a/dubbo/src/cluster/router/nacos_config_center/mod.rs b/dubbo/src/cluster/router/nacos_config_center/mod.rs new file mode 100644 index 00000000..7878fa9f --- /dev/null +++ b/dubbo/src/cluster/router/nacos_config_center/mod.rs @@ -0,0 +1 @@ +pub mod nacos_client; diff --git a/dubbo/src/cluster/router/nacos_config_center/nacos_client.rs b/dubbo/src/cluster/router/nacos_config_center/nacos_client.rs new file mode 100644 index 00000000..ce72641a --- /dev/null +++ b/dubbo/src/cluster/router/nacos_config_center/nacos_client.rs @@ -0,0 +1,126 @@ +use crate::cluster::router::manager::router_manager::{ + get_global_router_manager, RouterConfigChangeEvent, +}; +use dubbo_config::router::NacosConfig; +use dubbo_logger::{tracing, tracing::info}; +use nacos_sdk::api::{ + config::{ConfigChangeListener, ConfigResponse, ConfigService, ConfigServiceBuilder}, + props::ClientProps, +}; +use std::sync::{Arc, RwLock}; + +pub struct NacosClient { + pub client: Arc>, +} + +unsafe impl Send for NacosClient {} + +unsafe impl Sync for NacosClient {} + +pub struct ConfigChangeListenerImpl; + +impl NacosClient { + pub fn new_init_client(config: NacosConfig) -> Self { + let server_addr = config.addr; + let namespace = config.namespace; + let app = config.app; + let enable_auth = config.enable_auth; + + let mut props = ClientProps::new() + .server_addr(server_addr) + .namespace(namespace) + .app_name(app); + + if enable_auth.is_some() { + info!("enable nacos auth!"); + } else { + info!("disable nacos auth!"); + } + + if let Some(auth) = enable_auth { + props = props + .auth_username(auth.auth_username) + .auth_password(auth.auth_password); + } + + let client = Arc::new(RwLock::new( + ConfigServiceBuilder::new(props) + .build() + .expect("NacosClient build failed! Please check NacosConfig"), + )); + + Self { client } + } + + pub fn get_config(&self, data_id: &str, group: &str, config_type: &str) -> Option + where + T: serde::de::DeserializeOwned, + { + let config_resp = self + .client + .read() + .unwrap() + .get_config(data_id.to_string(), group.to_string()); + + match config_resp { + Ok(config_resp) => { + self.add_listener(data_id, group); + let string = config_resp.content(); + let result = serde_yaml::from_str(string); + + match result { + Ok(config) => { + info!( + "success to get {}Router config and parse success", + config_type + ); + Some(config) + } + Err(_) => { + info!("failed to parse {}Router rule", config_type); + None + } + } + } + Err(_) => None, + } + } + + pub fn add_listener(&self, data_id: &str, group: &str) { + if let Err(err) = self + .client + .write() + .map_err(|e| format!("failed to create nacos config listener: {}", e)) + .and_then(|client| { + client + .add_listener( + data_id.to_string(), + group.to_string(), + Arc::new(ConfigChangeListenerImpl {}), + ) + .map_err(|e| format!("failed to add nacos config listener: {}", e)) + }) + { + tracing::error!("{}", err); + } else { + info!("listening the config success"); + } + } +} + +impl ConfigChangeListener for ConfigChangeListenerImpl { + fn notify(&self, config_resp: ConfigResponse) { + let content_type = config_resp.content_type(); + let event = RouterConfigChangeEvent { + service_name: config_resp.data_id().to_string(), + router_kind: config_resp.group().to_string(), + content: config_resp.content().to_string(), + }; + + if content_type == "yaml" { + get_global_router_manager().write().unwrap().notify(event); + } + + info!("notify config: {:?}", config_resp); + } +} diff --git a/dubbo/src/cluster/router/router_chain.rs b/dubbo/src/cluster/router/router_chain.rs new file mode 100644 index 00000000..42d5826f --- /dev/null +++ b/dubbo/src/cluster/router/router_chain.rs @@ -0,0 +1,56 @@ +use crate::{cluster::router::BoxRouter, invocation::RpcInvocation}; +use dubbo_base::Url; +use std::{collections::HashMap, sync::Arc}; + +#[derive(Debug, Default)] +pub struct RouterChain { + pub routers: HashMap, + pub self_url: Url, +} + +impl RouterChain { + pub fn new() -> Self { + RouterChain { + routers: HashMap::new(), + self_url: Url::new(), + } + } + + pub fn route(&self, mut invokers: Vec, invocation: Arc) -> Vec { + for (_, value) in self.routers.iter() { + invokers = value.route(invokers, self.self_url.clone(), invocation.clone()) + } + invokers + } + + pub fn add_router(&mut self, key: String, router: BoxRouter) { + self.routers.insert(key, router); + } +} + +#[test] +fn test() { + use crate::cluster::router::manager::router_manager::get_global_router_manager; + + let u1 = Url::from_url("tri://127.0.0.1:8888/org.apache.dubbo.sample.tri.Greeter").unwrap(); + let u2 = Url::from_url("tri://127.0.0.1:8889/org.apache.dubbo.sample.tri.Greeter").unwrap(); + let u3 = Url::from_url("tri://127.0.0.1:8800/org.apache.dubbo.sample.tri.Greeter").unwrap(); + let u4 = Url::from_url("tri://127.0.2.1:8880/org.apache.dubbo.sample.tri.Greeter").unwrap(); + let u5 = Url::from_url("tri://127.0.1.1:8882/org.apache.dubbo.sample.tri.Greeter").unwrap(); + let u6 = Url::from_url("tri://213.0.1.1:8888/org.apache.dubbo.sample.tri.Greeter").unwrap(); + let u7 = Url::from_url("tri://169.0.1.1:8887/org.apache.dubbo.sample.tri.Greeter").unwrap(); + let invs = vec![u1, u2, u3, u4, u5, u6, u7]; + let len = invs.len().clone(); + let inv = Arc::new( + RpcInvocation::default() + .with_method_name("greet".to_string()) + .with_service_unique_name("org.apache.dubbo.sample.tri.Greeter".to_string()), + ); + let x = get_global_router_manager() + .read() + .unwrap() + .get_router_chain(inv.get_target_service_unique_name()); + let result = x.route(invs, inv.clone()); + println!("total:{},result:{}", len, result.len().clone()); + dbg!(result); +} diff --git a/dubbo/src/cluster/router/tag/mod.rs b/dubbo/src/cluster/router/tag/mod.rs new file mode 100644 index 00000000..6ac5b218 --- /dev/null +++ b/dubbo/src/cluster/router/tag/mod.rs @@ -0,0 +1 @@ +pub mod tag_router; diff --git a/dubbo/src/cluster/router/tag/tag_router.rs b/dubbo/src/cluster/router/tag/tag_router.rs new file mode 100644 index 00000000..7a83ea57 --- /dev/null +++ b/dubbo/src/cluster/router/tag/tag_router.rs @@ -0,0 +1,86 @@ +use crate::{ + cluster::router::{utils::to_original_map, Router}, + codegen::RpcInvocation, +}; +use dubbo_base::Url; +use dubbo_config::router::TagRouterConfig; +use std::{ + collections::HashMap, + fmt::Debug, + sync::{Arc, RwLock}, +}; + +#[derive(Debug, Clone, Default)] +pub struct TagRouterInner { + pub tag_rules: HashMap>, + pub force: bool, + pub enabled: bool, +} + +#[derive(Debug, Clone, Default)] +pub struct TagRouter { + pub(crate) inner: Arc>, +} +impl Router for TagRouter { + fn route(&self, invokers: Vec, url: Url, invocation: Arc) -> Vec { + return self.inner.read().unwrap().route(invokers, url, invocation); + } +} + +impl TagRouterInner { + pub fn parse_config(&mut self, config: TagRouterConfig) { + self.tag_rules = HashMap::new(); + self.force = config.force; + self.enabled = config.enabled; + for tag in &config.tags { + let mut tags = HashMap::new(); + for rule in &tag.matches { + tags.insert(rule.key.clone(), rule.value.clone()); + } + self.tag_rules.insert(tag.name.clone(), tags); + } + } + + pub fn match_tag(&self, params: HashMap) -> Option { + let mut tag_result = None; + for (tag, tag_rules) in &self.tag_rules { + for (key, value) in tag_rules { + match params.get(key.as_str()) { + None => {} + Some(val) => { + if val == value { + tag_result = Some(tag.clone()) + } + } + } + } + } + tag_result + } + + pub fn route(&self, invokers: Vec, url: Url, _invocation: Arc) -> Vec { + if !self.enabled { + return invokers; + }; + let self_param = to_original_map(url); + let invocation_tag = self.match_tag(self_param); + let mut invokers_result = Vec::new(); + let mut invokers_no_tag = Vec::new(); + for invoker in &invokers { + let invoker_param = to_original_map(invoker.clone()); + let invoker_tag = self.match_tag(invoker_param); + if invoker_tag == None { + invokers_no_tag.push(invoker.clone()); + } + if invoker_tag == invocation_tag { + invokers_result.push(invoker.clone()); + } + } + if invokers_result.is_empty() { + if !self.force { + return invokers_no_tag; + } + } + invokers_result + } +} diff --git a/dubbo/src/cluster/router/utils.rs b/dubbo/src/cluster/router/utils.rs new file mode 100644 index 00000000..2ca50fcc --- /dev/null +++ b/dubbo/src/cluster/router/utils.rs @@ -0,0 +1,16 @@ +use dubbo_base::Url; +use std::{collections::HashMap, string::String}; + +pub fn to_original_map(url: Url) -> HashMap { + let mut result: HashMap = HashMap::new(); + result.insert("scheme".parse().unwrap(), url.scheme); + result.insert("location".parse().unwrap(), url.location); + result.insert("ip".parse().unwrap(), url.ip); + result.insert("port".parse().unwrap(), url.port); + result.insert("service_name".parse().unwrap(), url.service_name); + result.insert("service_key".parse().unwrap(), url.service_key); + for (key, value) in url.params { + result.insert(key, value); + } + result +} diff --git a/dubbo/src/codegen.rs b/dubbo/src/codegen.rs index 8d95c21b..452f560d 100644 --- a/dubbo/src/codegen.rs +++ b/dubbo/src/codegen.rs @@ -27,14 +27,12 @@ pub use hyper::Body as hyperBody; pub use tower_service::Service; pub use super::{ - cluster::directory::RegistryDirectory, empty_body, invocation::{IntoStreamingRequest, Request, Response, RpcInvocation}, protocol::{triple::triple_invoker::TripleInvoker, Invoker}, - registry::{BoxRegistry, Registry}, triple::{ client::TripleClient, - codec::{prost::ProstCodec, serde_codec::SerdeCodec, Codec}, + codec::{prost::ProstCodec, Codec}, decode::Decoding, server::{ service::{ClientStreamingSvc, ServerStreamingSvc, StreamingSvc, UnarySvc}, @@ -46,8 +44,7 @@ pub use super::{ pub use crate::{ filter::{service::FilterService, Filter}, triple::{ - client::builder::{ClientBoxService, ClientBuilder}, - server::builder::ServerBuilder, + client::builder::ClientBuilder, server::builder::ServerBuilder, transport::connection::Connection, }, }; diff --git a/dubbo/src/directory/mod.rs b/dubbo/src/directory/mod.rs new file mode 100644 index 00000000..f0d9ebe2 --- /dev/null +++ b/dubbo/src/directory/mod.rs @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::{ + collections::HashMap, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll}, +}; + +use crate::{ + codegen::{RpcInvocation, TripleInvoker}, + invocation::Invocation, + invoker::{clone_invoker::CloneInvoker, NewInvoker}, + param::Param, + registry::n_registry::Registry, + svc::NewService, + StdError, +}; +use dubbo_base::Url; +use dubbo_logger::tracing::debug; +use futures_core::ready; +use futures_util::future; +use tokio::sync::mpsc::channel; +use tokio_stream::wrappers::ReceiverStream; +use tower::{ + buffer::Buffer, + discover::{Change, Discover}, +}; + +use tower_service::Service; + +type BufferedDirectory = + Buffer, StdError>>>, ()>; + +pub struct NewCachedDirectory +where + N: Registry + Clone + Send + Sync + 'static, +{ + inner: CachedDirectory, RpcInvocation>, +} + +pub struct CachedDirectory +where + // NewDirectory + N: NewService, +{ + inner: N, + cache: Arc>>, +} + +pub struct NewDirectory { + // registry + inner: N, +} + +pub struct Directory { + directory: HashMap>, + discover: D, + new_invoker: NewInvoker, +} + +impl NewCachedDirectory +where + N: Registry + Clone + Send + Sync + 'static, +{ + pub fn layer() -> impl tower_layer::Layer { + tower_layer::layer_fn(|inner: N| { + NewCachedDirectory { + // inner is registry + inner: CachedDirectory::new(NewDirectory::new(inner)), + } + }) + } +} + +impl NewService for NewCachedDirectory +where + T: Param, + // service registry + N: Registry + Clone + Send + Sync + 'static, +{ + type Service = BufferedDirectory; + + fn new_service(&self, target: T) -> Self::Service { + self.inner.new_service(target.param()) + } +} + +impl CachedDirectory +where + N: NewService, +{ + pub fn new(inner: N) -> Self { + CachedDirectory { + inner, + cache: Default::default(), + } + } +} + +impl NewService for CachedDirectory +where + T: Param, + // NewDirectory + N: NewService, + // Buffered directory + N::Service: Clone, +{ + type Service = N::Service; + + fn new_service(&self, target: T) -> Self::Service { + let rpc_invocation = target.param(); + let service_name = rpc_invocation.get_target_service_unique_name(); + let mut cache = self.cache.lock().expect("cached directory lock failed."); + let value = cache.get(&service_name).map(|val| val.clone()); + match value { + None => { + let new_service = self.inner.new_service(target); + cache.insert(service_name, new_service.clone()); + new_service + } + Some(value) => value, + } + } +} + +impl NewDirectory { + const MAX_DIRECTORY_BUFFER_SIZE: usize = 16; + + pub fn new(inner: N) -> Self { + NewDirectory { inner } + } +} + +impl NewService for NewDirectory +where + T: Param, + // service registry + N: Registry + Clone + Send + Sync + 'static, +{ + type Service = BufferedDirectory; + + fn new_service(&self, target: T) -> Self::Service { + let service_name = target.param().get_target_service_unique_name(); + + let registry = self.inner.clone(); + + let (tx, rx) = channel(Self::MAX_DIRECTORY_BUFFER_SIZE); + + tokio::spawn(async move { + // todo use dubbo url model generate subscribe url + // category:serviceInterface:version:group + let consumer_url = format!("consumer://{}/{}", "127.0.0.1:8888", service_name); + let subscribe_url = Url::from_url(&consumer_url).unwrap(); + let receiver = registry.subscribe(subscribe_url).await; + debug!("discover start!"); + match receiver { + Err(_e) => { + // error!("discover stream error: {}", e); + debug!("discover stream error"); + } + Ok(mut receiver) => loop { + let change = receiver.recv().await; + debug!("receive change: {:?}", change); + match change { + None => { + debug!("discover stream closed."); + break; + } + Some(change) => { + let _ = tx.send(change).await; + } + } + }, + } + }); + + Buffer::new( + Directory::new(ReceiverStream::new(rx)), + Self::MAX_DIRECTORY_BUFFER_SIZE, + ) + } +} + +impl Directory { + pub fn new(discover: D) -> Self { + Directory { + directory: Default::default(), + discover, + new_invoker: NewInvoker, + } + } +} + +impl Service<()> for Directory +where + // Discover + D: Discover + Unpin + Send, + D::Error: Into, +{ + type Response = Vec>; + + type Error = StdError; + + type Future = future::Ready>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + let pin_discover = Pin::new(&mut self.discover); + + match pin_discover.poll_discover(cx) { + Poll::Pending => { + if self.directory.is_empty() { + return Poll::Pending; + } else { + return Poll::Ready(Ok(())); + } + } + Poll::Ready(change) => { + let change = change.transpose().map_err(|e| e.into())?; + match change { + Some(Change::Remove(key)) => { + debug!("remove key: {}", key); + self.directory.remove(&key); + } + Some(Change::Insert(key, _)) => { + debug!("insert key: {}", key); + let invoker = self.new_invoker.new_service(key.clone()); + self.directory.insert(key, invoker); + } + None => { + debug!("stream closed"); + return Poll::Ready(Ok(())); + } + } + } + } + } + } + + fn call(&mut self, _: ()) -> Self::Future { + let vec = self + .directory + .values() + .map(|val| val.clone()) + .collect::>>(); + future::ok(vec) + } +} diff --git a/dubbo/src/framework.rs b/dubbo/src/framework.rs index d595f38a..f1d8c23e 100644 --- a/dubbo/src/framework.rs +++ b/dubbo/src/framework.rs @@ -22,13 +22,15 @@ use std::{ sync::{Arc, Mutex}, }; +use crate::triple::server::support::RpcServer; use crate::{ protocol::{BoxExporter, Protocol}, registry::{ + n_registry::{ArcRegistry, Registry}, protocol::RegistryProtocol, types::{Registries, RegistriesOperation}, - BoxRegistry, Registry, }, + triple::server::support::RpcHttp2Server, }; use dubbo_base::Url; use dubbo_config::{get_global_config, protocol::ProtocolRetrieve, RootConfig}; @@ -60,14 +62,28 @@ impl Dubbo { self } - pub fn add_registry(mut self, registry_key: &str, registry: BoxRegistry) -> Self { + pub fn add_registry(mut self, registry_key: &str, registry: ArcRegistry) -> Self { if self.registries.is_none() { self.registries = Some(Arc::new(Mutex::new(HashMap::new()))); } self.registries .as_ref() .unwrap() - .insert(registry_key.to_string(), Arc::new(Mutex::new(registry))); + .insert(registry_key.to_string(), registry); + self + } + + pub fn register_server(self, server: T) -> Self { + let info = server.get_info(); + let server_name = info.0.to_owned() + "." + info.1; + let s: RpcHttp2Server = RpcHttp2Server::new(server); + crate::protocol::triple::TRIPLE_SERVICES + .write() + .unwrap() + .insert( + server_name, + crate::utils::boxed_clone::BoxCloneService::new(s), + ); self } @@ -88,8 +104,12 @@ impl Dubbo { let protocol = root_config .protocols .get_protocol_or_default(service_config.protocol.as_str()); - let protocol_url = - format!("{}/{}", protocol.to_url(), service_config.interface.clone(),); + let mut protocol_url = + format!("{}/{}", protocol.to_url(), service_config.interface.clone()); + if let Some(serialization) = &service_config.serialization { + protocol_url.push_str(&format!("?serialization={}", serialization)); + } + tracing::info!("protocol_url: {:?}", protocol_url); Url::from_url(&protocol_url) } else { @@ -130,12 +150,13 @@ impl Dubbo { async_vec.push(exporter); //TODO multiple registry if self.registries.is_some() { - self.registries + let _ = self + .registries .as_ref() .unwrap() .default_registry() .register(url.clone()) - .unwrap(); + .await; } } } diff --git a/dubbo/src/invocation.rs b/dubbo/src/invocation.rs index bdd152be..9a70d795 100644 --- a/dubbo/src/invocation.rs +++ b/dubbo/src/invocation.rs @@ -79,6 +79,7 @@ impl Request { } } +#[derive(Debug)] pub struct Response { message: T, metadata: Metadata, @@ -196,7 +197,7 @@ pub trait Invocation { fn get_method_name(&self) -> String; } -#[derive(Default)] +#[derive(Default, Clone)] pub struct RpcInvocation { target_service_unique_name: String, method_name: String, diff --git a/dubbo/src/invoker/clone_body.rs b/dubbo/src/invoker/clone_body.rs new file mode 100644 index 00000000..4de8f899 --- /dev/null +++ b/dubbo/src/invoker/clone_body.rs @@ -0,0 +1,350 @@ +use std::{ + collections::VecDeque, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll}, +}; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use futures_core::ready; + +use http::HeaderMap; +use http_body::Body; +use pin_project::pin_project; +use thiserror::Error; + +use crate::StdError; + +#[derive(Error, Debug)] +#[error("buffered body reach max capacity.")] +pub struct ReachMaxCapacityError; + +pub struct BufferedBody { + shared: Arc>>, + owned: Option, + replay_body: bool, + replay_trailers: bool, + is_empty: bool, + size_hint: http_body::SizeHint, +} + +pub struct OwnedBufferedBody { + body: hyper::Body, + trailers: Option, + buf: InnerBuffer, +} + +impl BufferedBody { + pub fn new(body: hyper::Body, buf_size: usize) -> Self { + let size_hint = body.size_hint(); + let is_empty = body.is_end_stream(); + BufferedBody { + shared: Default::default(), + owned: Some(OwnedBufferedBody { + body, + trailers: None, + buf: InnerBuffer { + bufs: Default::default(), + capacity: buf_size, + }, + }), + replay_body: false, + replay_trailers: false, + is_empty, + size_hint, + } + } +} + +impl Clone for BufferedBody { + fn clone(&self) -> Self { + Self { + shared: self.shared.clone(), + owned: None, + replay_body: true, + replay_trailers: true, + is_empty: self.is_empty, + size_hint: self.size_hint.clone(), + } + } +} + +impl Drop for BufferedBody { + fn drop(&mut self) { + if let Some(owned) = self.owned.take() { + let lock = self.shared.lock(); + if let Ok(mut lock) = lock { + *lock = Some(owned); + } + } + } +} + +impl Body for BufferedBody { + type Data = BytesData; + type Error = StdError; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let mut_self = self.get_mut(); + + let owned_body = mut_self.owned.get_or_insert_with(|| { + let lock = mut_self.shared.lock(); + if let Err(e) = lock { + panic!("buffered body get shared data lock failed. {}", e); + } + let mut data = lock.unwrap(); + + data.take().expect("cannot get shared buffered body.") + }); + + if mut_self.replay_body { + mut_self.replay_body = false; + if owned_body.buf.has_remaining() { + return Poll::Ready(Some(Ok(BytesData::BufferedBytes(owned_body.buf.clone())))); + } + + if owned_body.buf.is_capped() { + return Poll::Ready(Some(Err(ReachMaxCapacityError.into()))); + } + } + + if mut_self.is_empty { + return Poll::Ready(None); + } + + let mut data = { + let pin = Pin::new(&mut owned_body.body); + let data = ready!(pin.poll_data(cx)); + match data { + Some(Ok(data)) => data, + Some(Err(e)) => return Poll::Ready(Some(Err(e.into()))), + None => { + mut_self.is_empty = true; + return Poll::Ready(None); + } + } + }; + + let len = data.remaining(); + + owned_body.buf.capacity = owned_body.buf.capacity.saturating_sub(len); + + let data = if owned_body.buf.is_capped() { + if owned_body.buf.has_remaining() { + owned_body.buf.bufs = VecDeque::default(); + } + data.copy_to_bytes(len) + } else { + owned_body.buf.push_bytes(data.copy_to_bytes(len)) + }; + + Poll::Ready(Some(Ok(BytesData::OriginBytes(data)))) + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + let mut_self = self.get_mut(); + let owned_body = mut_self.owned.get_or_insert_with(|| { + let lock = mut_self.shared.lock(); + if let Err(e) = lock { + panic!("buffered body get shared data lock failed. {}", e); + } + let mut data = lock.unwrap(); + + data.take().expect("cannot get shared buffered body.") + }); + + if mut_self.replay_trailers { + mut_self.replay_trailers = false; + if let Some(ref trailers) = owned_body.trailers { + return Poll::Ready(Ok(Some(trailers.clone()))); + } + } + + let mut_body = &mut owned_body.body; + if !mut_body.is_end_stream() { + let trailers = ready!(Pin::new(mut_body).poll_trailers(cx)).map(|trailers| { + owned_body.trailers = trailers.clone(); + trailers + }); + return Poll::Ready(trailers.map_err(|e| e.into())); + } + + Poll::Ready(Ok(None)) + } + + fn is_end_stream(&self) -> bool { + if self.is_empty { + return true; + } + + let is_end = self + .owned + .as_ref() + .map(|owned| owned.body.is_end_stream()) + .unwrap_or(false); + + !self.replay_body && !self.replay_trailers && is_end + } + + fn size_hint(&self) -> http_body::SizeHint { + self.size_hint.clone() + } +} + +#[derive(Clone)] +pub struct InnerBuffer { + bufs: VecDeque, + capacity: usize, +} + +impl InnerBuffer { + pub fn push_bytes(&mut self, bytes: Bytes) -> Bytes { + self.bufs.push_back(bytes.clone()); + bytes + } + + pub fn is_capped(&self) -> bool { + self.capacity == 0 + } +} + +impl Buf for InnerBuffer { + fn remaining(&self) -> usize { + self.bufs.iter().map(|bytes| bytes.remaining()).sum() + } + + fn chunk(&self) -> &[u8] { + self.bufs.front().map(Buf::chunk).unwrap_or(&[]) + } + + fn chunks_vectored<'a>(&'a self, dst: &mut [std::io::IoSlice<'a>]) -> usize { + if dst.is_empty() { + return 0; + } + + let mut filled = 0; + + for bytes in self.bufs.iter() { + filled += bytes.chunks_vectored(&mut dst[filled..]) + } + + filled + } + + fn advance(&mut self, mut cnt: usize) { + while cnt > 0 { + let first = self.bufs.front_mut(); + if first.is_none() { + break; + } + let first = first.unwrap(); + let first_remaining = first.remaining(); + if first_remaining > cnt { + first.advance(cnt); + break; + } + + first.advance(first_remaining); + cnt = cnt - first_remaining; + self.bufs.pop_front(); + } + } + + fn copy_to_bytes(&mut self, len: usize) -> bytes::Bytes { + match self.bufs.front_mut() { + Some(buf) if len <= buf.remaining() => { + let bytes = buf.copy_to_bytes(len); + if buf.remaining() == 0 { + self.bufs.pop_front(); + } + bytes + } + _ => { + let mut bytes = BytesMut::with_capacity(len); + bytes.put(self.take(len)); + bytes.freeze() + } + } + } +} + +pub enum BytesData { + BufferedBytes(InnerBuffer), + OriginBytes(Bytes), +} + +impl Buf for BytesData { + fn remaining(&self) -> usize { + match self { + BytesData::BufferedBytes(bytes) => bytes.remaining(), + BytesData::OriginBytes(bytes) => bytes.remaining(), + } + } + + fn chunk(&self) -> &[u8] { + match self { + BytesData::BufferedBytes(bytes) => bytes.chunk(), + BytesData::OriginBytes(bytes) => bytes.chunk(), + } + } + + fn advance(&mut self, cnt: usize) { + match self { + BytesData::BufferedBytes(bytes) => bytes.advance(cnt), + BytesData::OriginBytes(bytes) => bytes.advance(cnt), + } + } + + fn copy_to_bytes(&mut self, len: usize) -> bytes::Bytes { + match self { + BytesData::BufferedBytes(bytes) => bytes.copy_to_bytes(len), + BytesData::OriginBytes(bytes) => bytes.copy_to_bytes(len), + } + } +} + +#[pin_project] +pub struct CloneBody(#[pin] BufferedBody); + +impl CloneBody { + pub fn new(inner_body: hyper::Body) -> Self { + let inner_body = BufferedBody::new(inner_body, 1024 * 64); + CloneBody(inner_body) + } +} + +impl Body for CloneBody { + type Data = BytesData; + + type Error = StdError; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + self.project().0.poll_data(cx) + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + self.project().0.poll_trailers(cx) + } + + fn size_hint(&self) -> http_body::SizeHint { + self.0.size_hint() + } +} + +impl Clone for CloneBody { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} diff --git a/dubbo/src/invoker/clone_invoker.rs b/dubbo/src/invoker/clone_invoker.rs new file mode 100644 index 00000000..c1fa00d8 --- /dev/null +++ b/dubbo/src/invoker/clone_invoker.rs @@ -0,0 +1,256 @@ +use std::{mem, pin::Pin, task::Poll}; + +use dubbo_logger::tracing::debug; +use futures_core::{future::BoxFuture, ready, Future, TryFuture}; +use futures_util::FutureExt; +use pin_project::pin_project; +use thiserror::Error; +use tokio::{ + sync::{ + self, + watch::{Receiver, Sender}, + }, + task::JoinHandle, +}; +use tokio_util::sync::ReusableBoxFuture; +use tower::{buffer::Buffer, ServiceExt}; +use tower_service::Service; + +use crate::StdError; + +use super::clone_body::CloneBody; + +enum Inner { + Invalid, + Ready(S), + Pending(JoinHandle>), +} + +#[derive(Debug, Error)] +#[error("the inner service has not got ready yet!")] +struct InnerServiceNotReadyErr; + +#[pin_project(project = InnerServiceCallingResponseProj)] +enum InnerServiceCallingResponse { + Call(#[pin] Fut), + Fail, +} + +impl Future for InnerServiceCallingResponse +where + Fut: TryFuture, + Fut::Error: Into, +{ + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + match self.project() { + InnerServiceCallingResponseProj::Call(call) => call.try_poll(cx).map_err(Into::into), + InnerServiceCallingResponseProj::Fail => { + Poll::Ready(Err(InnerServiceNotReadyErr.into())) + } + } + } +} + +#[derive(Clone)] +enum ObserveState { + Ready, + Pending, +} + +struct ReadyService { + inner: Inner, + tx: Sender, +} + +impl ReadyService { + fn new(inner: S) -> (Self, Receiver) { + let (tx, rx) = sync::watch::channel(ObserveState::Ready); + let ready_service = Self { + inner: Inner::Ready(inner), + tx, + }; + (ready_service, rx) + } +} + +impl Service for ReadyService +where + S: Service + Send + 'static, + >::Error: Into, +{ + type Response = S::Response; + + type Error = StdError; + + type Future = InnerServiceCallingResponse; + + fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + loop { + match mem::replace(&mut self.inner, Inner::Invalid) { + Inner::Ready(mut svc) => { + let poll_ready = svc.poll_ready(cx); + match poll_ready { + Poll::Pending => { + self.inner = Inner::Pending(tokio::spawn(async move { + let poll_ready = svc.ready().await; + match poll_ready { + Ok(_) => Ok(svc), + Err(err) => Err((svc, err.into())), + } + })); + + let _ = self.tx.send(ObserveState::Pending); + continue; + } + Poll::Ready(ret) => { + self.inner = Inner::Ready(svc); + + let _ = self.tx.send(ObserveState::Ready); + return Poll::Ready(ret.map_err(Into::into)); + } + } + } + Inner::Pending(mut join_handle) => { + if let Poll::Ready(res) = join_handle.poll_unpin(cx) { + let (svc, res) = match res { + Err(join_err) => panic!("ReadyService panicked: {join_err}"), + Ok(Err((svc, err))) => (svc, Poll::Ready(Err(err))), + Ok(Ok(svc)) => (svc, Poll::Ready(Ok(()))), + }; + + self.inner = Inner::Ready(svc); + + let _ = self.tx.send(ObserveState::Ready); + return res; + } else { + self.inner = Inner::Pending(join_handle); + + let _ = self.tx.send(ObserveState::Pending); + return Poll::Pending; + } + } + Inner::Invalid => panic!("ReadyService panicked: inner state is invalid"), + } + } + } + + fn call(&mut self, req: Req) -> Self::Future { + match self.inner { + Inner::Ready(ref mut svc) => InnerServiceCallingResponse::Call(svc.call(req)), + _ => InnerServiceCallingResponse::Fail, + } + } +} + +impl Drop for ReadyService { + fn drop(&mut self) { + if let Inner::Pending(ref handler) = self.inner { + handler.abort(); + } + } +} + +pub struct CloneInvoker +where + Inv: Service> + Send + 'static, + Inv::Error: Into + Send + Sync + 'static, + Inv::Future: Send, +{ + inner: Buffer, http::Request>, + rx: Receiver, + poll: ReusableBoxFuture<'static, ObserveState>, + polling: bool, +} + +impl CloneInvoker +where + Inv: Service> + Send + 'static, + Inv::Error: Into + Send + Sync + 'static, + Inv::Future: Send, +{ + const MAX_INVOKER_BUFFER_SIZE: usize = 16; + + pub fn new(invoker: Inv) -> Self { + let (ready_service, rx) = ReadyService::new(invoker); + + let buffer: Buffer, http::Request> = + Buffer::new(ready_service, Self::MAX_INVOKER_BUFFER_SIZE); + + Self { + inner: buffer, + rx, + polling: false, + poll: ReusableBoxFuture::new(futures::future::pending()), + } + } +} + +impl Service> for CloneInvoker +where + Inv: Service> + Send + 'static, + Inv::Error: Into + Send + Sync + 'static, + Inv::Future: Send, +{ + type Response = Inv::Response; + + type Error = StdError; + + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + loop { + if !self.polling { + match self.rx.borrow().clone() { + ObserveState::Ready => return self.inner.poll_ready(cx), + ObserveState::Pending => { + self.polling = true; + let mut rx = self.rx.clone(); + self.poll.set(async move { + loop { + let current_state = rx.borrow_and_update().clone(); + if matches!(current_state, ObserveState::Ready) { + return current_state; + } + if let Err(_) = rx.changed().await { + debug!("the readyService has already shutdown!"); + futures::future::pending::().await; + } + } + }); + } + } + } + + let state = ready!(self.poll.poll_unpin(cx)); + self.polling = false; + + if matches!(state, ObserveState::Pending) { + continue; + } + + return self.inner.poll_ready(cx); + } + } + + fn call(&mut self, req: http::Request) -> Self::Future { + Box::pin(self.inner.call(req)) + } +} + +impl Clone for CloneInvoker +where + Inv: Service> + Send + 'static, + Inv::Error: Into + Send + Sync + 'static, + Inv::Future: Send, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + rx: self.rx.clone(), + polling: false, + poll: ReusableBoxFuture::new(futures::future::pending()), + } + } +} diff --git a/dubbo/src/invoker/mod.rs b/dubbo/src/invoker/mod.rs new file mode 100644 index 00000000..92b8b462 --- /dev/null +++ b/dubbo/src/invoker/mod.rs @@ -0,0 +1,19 @@ +use dubbo_base::Url; + +use crate::{codegen::TripleInvoker, invoker::clone_invoker::CloneInvoker, svc::NewService}; + +pub mod clone_body; +pub mod clone_invoker; + +pub struct NewInvoker; + +impl NewService for NewInvoker { + type Service = CloneInvoker; + + fn new_service(&self, url: String) -> Self::Service { + // todo create another invoker by url protocol + + let url = Url::from_url(&url).unwrap(); + CloneInvoker::new(TripleInvoker::new(url)) + } +} diff --git a/dubbo/src/lib.rs b/dubbo/src/lib.rs index 63c09d3a..d397b42b 100644 --- a/dubbo/src/lib.rs +++ b/dubbo/src/lib.rs @@ -18,12 +18,18 @@ pub mod cluster; pub mod codegen; pub mod context; +pub mod directory; pub mod filter; mod framework; pub mod invocation; +pub mod invoker; +pub mod loadbalancer; +pub mod param; pub mod protocol; pub mod registry; +pub mod route; pub mod status; +pub mod svc; pub mod triple; pub mod utils; diff --git a/dubbo/src/loadbalancer/mod.rs b/dubbo/src/loadbalancer/mod.rs new file mode 100644 index 00000000..4e26781d --- /dev/null +++ b/dubbo/src/loadbalancer/mod.rs @@ -0,0 +1,95 @@ +use futures_core::future::BoxFuture; +use tower::{discover::ServiceList, ServiceExt}; +use tower_service::Service; + +use crate::{ + codegen::RpcInvocation, + invoker::{clone_body::CloneBody, clone_invoker::CloneInvoker}, + param::Param, + svc::NewService, + StdError, +}; + +use crate::protocol::triple::triple_invoker::TripleInvoker; + +pub struct NewLoadBalancer { + inner: N, +} + +#[derive(Clone)] +pub struct LoadBalancer { + inner: S, // Routes service +} + +impl NewLoadBalancer { + pub fn layer() -> impl tower_layer::Layer { + tower_layer::layer_fn(|inner| { + NewLoadBalancer { + inner, // NewRoutes + } + }) + } +} + +impl NewService for NewLoadBalancer +where + T: Param + Clone, + // NewRoutes + N: NewService, +{ + type Service = LoadBalancer; + + fn new_service(&self, target: T) -> Self::Service { + // Routes service + let svc = self.inner.new_service(target); + + LoadBalancer { inner: svc } + } +} + +impl Service> for LoadBalancer +where + // Routes service + N: Service<(), Response = Vec>> + Clone, + N::Error: Into + Send, + N::Future: Send + 'static, +{ + type Response = as Service>>::Response; + + type Error = StdError; + + type Future = BoxFuture<'static, Result>; + + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, req: http::Request) -> Self::Future { + let routes = self.inner.call(()); + + let fut = async move { + let routes = routes.await; + + let routes: Vec> = match routes { + Err(e) => return Err(Into::::into(e)), + Ok(routes) => routes, + }; + + let service_list: Vec<_> = routes + .into_iter() + .map(|invoker| tower::load::Constant::new(invoker, 1)) + .collect(); + + let service_list = ServiceList::new(service_list); + + let p2c = tower::balance::p2c::Balance::new(service_list); + + p2c.oneshot(req).await + }; + + Box::pin(fut) + } +} diff --git a/dubbo/src/param.rs b/dubbo/src/param.rs new file mode 100644 index 00000000..bef50419 --- /dev/null +++ b/dubbo/src/param.rs @@ -0,0 +1,9 @@ +pub trait Param { + fn param(&self) -> T; +} + +impl Param for T { + fn param(&self) -> T::Owned { + self.to_owned() + } +} diff --git a/dubbo/src/protocol/mod.rs b/dubbo/src/protocol/mod.rs index 145bcc8e..7dbf1f3f 100644 --- a/dubbo/src/protocol/mod.rs +++ b/dubbo/src/protocol/mod.rs @@ -15,15 +15,10 @@ * limitations under the License. */ -use std::{ - fmt::Debug, - future::Future, - task::{Context, Poll}, -}; +use std::task::{Context, Poll}; use async_trait::async_trait; use aws_smithy_http::body::SdkBody; -use dyn_clone::DynClone; use tower_service::Service; use dubbo_base::Url; @@ -44,18 +39,8 @@ pub trait Exporter { fn unexport(&self); } -pub trait Invoker: Debug + DynClone { - type Response; - - type Error; - - type Future: Future>; - +pub trait Invoker: Service { fn get_url(&self) -> Url; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; - - fn call(&mut self, req: ReqBody) -> Self::Future; } pub type BoxExporter = Box; @@ -69,15 +54,6 @@ pub type BoxInvoker = Box< + Sync, >; -dyn_clone::clone_trait_object!( - Invoker< - http::Request, - Response = http::Response, - Error = crate::Error, - Future = crate::BoxFuture, crate::Error>, - > -); - pub struct WrapperInvoker(T); impl Service> for WrapperInvoker diff --git a/dubbo/src/protocol/triple/triple_invoker.rs b/dubbo/src/protocol/triple/triple_invoker.rs index 42dfebe0..c8451e3c 100644 --- a/dubbo/src/protocol/triple/triple_invoker.rs +++ b/dubbo/src/protocol/triple/triple_invoker.rs @@ -15,8 +15,8 @@ * limitations under the License. */ -use aws_smithy_http::body::SdkBody; use dubbo_base::Url; +use http::{HeaderValue, Uri}; use std::{ fmt::{Debug, Formatter}, str::FromStr, @@ -24,27 +24,21 @@ use std::{ use tower_service::Service; use crate::{ - protocol::Invoker, - triple::{client::builder::ClientBoxService, transport::connection::Connection}, - utils::boxed_clone::BoxCloneService, + invoker::clone_body::CloneBody, + triple::transport::{self, connection::Connection}, }; -#[derive(Clone)] pub struct TripleInvoker { url: Url, - conn: ClientBoxService, + conn: Connection, } impl TripleInvoker { pub fn new(url: Url) -> TripleInvoker { let uri = http::Uri::from_str(&url.to_url()).unwrap(); - let mut conn = Connection::new().with_host(uri.clone()); - if let Some(scheme) = uri.scheme_str() { - conn = conn.with_connector(scheme.to_string()); - } Self { url, - conn: BoxCloneService::new(conn), + conn: Connection::new().with_host(uri).build(), } } } @@ -55,25 +49,107 @@ impl Debug for TripleInvoker { } } -impl Invoker> for TripleInvoker { - type Response = http::Response; +impl TripleInvoker { + pub fn map_request(&self, req: http::Request) -> http::Request { + let (parts, body) = req.into_parts(); - type Error = crate::Error; + let path_and_query = parts.headers.get("path").unwrap().to_str().unwrap(); - type Future = crate::BoxFuture; + let authority = self.url.clone().get_ip_port(); - fn get_url(&self) -> Url { - self.url.clone() - } + let uri = Uri::builder() + .scheme("http") + .authority(authority) + .path_and_query(path_and_query) + .build() + .unwrap(); - fn call(&mut self, req: http::Request) -> Self::Future { - self.conn.call(req) + let mut req = hyper::Request::builder() + .version(http::Version::HTTP_2) + .uri(uri.clone()) + .method("POST") + .body(body) + .unwrap(); + + // *req.version_mut() = http::Version::HTTP_2; + req.headers_mut() + .insert("method", HeaderValue::from_static("POST")); + req.headers_mut().insert( + "scheme", + HeaderValue::from_str(uri.scheme_str().unwrap()).unwrap(), + ); + req.headers_mut() + .insert("path", HeaderValue::from_str(uri.path()).unwrap()); + req.headers_mut().insert( + "authority", + HeaderValue::from_str(uri.authority().unwrap().as_str()).unwrap(), + ); + req.headers_mut().insert( + "content-type", + HeaderValue::from_static("application/grpc+proto"), + ); + req.headers_mut() + .insert("user-agent", HeaderValue::from_static("dubbo-rust/0.1.0")); + req.headers_mut() + .insert("te", HeaderValue::from_static("trailers")); + req.headers_mut().insert( + "tri-service-version", + HeaderValue::from_static("dubbo-rust/0.1.0"), + ); + req.headers_mut() + .insert("tri-service-group", HeaderValue::from_static("cluster")); + req.headers_mut().insert( + "tri-unit-info", + HeaderValue::from_static("dubbo-rust/0.1.0"), + ); + // if let Some(_encoding) = self.send_compression_encoding { + + // } + + req.headers_mut() + .insert("grpc-encoding", http::HeaderValue::from_static("gzip")); + + req.headers_mut().insert( + "grpc-accept-encoding", + http::HeaderValue::from_static("gzip"), + ); + + // // const ( + // // TripleContentType = "application/grpc+proto" + // // TripleUserAgent = "grpc-go/1.35.0-dev" + // // TripleServiceVersion = "tri-service-version" + // // TripleAttachement = "tri-attachment" + // // TripleServiceGroup = "tri-service-group" + // // TripleRequestID = "tri-req-id" + // // TripleTraceID = "tri-trace-traceid" + // // TripleTraceRPCID = "tri-trace-rpcid" + // // TripleTraceProtoBin = "tri-trace-proto-bin" + // // TripleUnitInfo = "tri-unit-info" + // // ) + req } +} + +impl Service> for TripleInvoker { + type Response = http::Response; + + type Error = crate::Error; + + type Future = crate::BoxFuture; fn poll_ready( &mut self, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { - self.conn.poll_ready(cx) + >>::poll_ready( + &mut self.conn, + cx, + ) + } + + fn call(&mut self, req: http::Request) -> Self::Future { + let req = self.map_request(req); + + self.conn.call(req) } } diff --git a/dubbo/src/registry/mod.rs b/dubbo/src/registry/mod.rs index 2a95452a..b82fda8d 100644 --- a/dubbo/src/registry/mod.rs +++ b/dubbo/src/registry/mod.rs @@ -17,46 +17,46 @@ #![allow(unused_variables, dead_code, missing_docs)] pub mod integration; -pub mod memory_registry; +pub mod n_registry; pub mod protocol; pub mod types; -use std::{ - fmt::{Debug, Formatter}, - sync::Arc, -}; - -use dubbo_base::Url; - -pub type RegistryNotifyListener = Arc; -pub trait Registry { - fn register(&mut self, url: Url) -> Result<(), crate::StdError>; - fn unregister(&mut self, url: Url) -> Result<(), crate::StdError>; - - fn subscribe(&self, url: Url, listener: RegistryNotifyListener) -> Result<(), crate::StdError>; - fn unsubscribe( - &self, - url: Url, - listener: RegistryNotifyListener, - ) -> Result<(), crate::StdError>; -} - -pub trait NotifyListener { - fn notify(&self, event: ServiceEvent); - fn notify_all(&self, event: ServiceEvent); -} - -#[derive(Debug)] -pub struct ServiceEvent { - pub key: String, - pub action: String, - pub service: Vec, -} - -pub type BoxRegistry = Box; - -impl Debug for BoxRegistry { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("BoxRegistry") - } -} +// use std::{ +// fmt::{Debug, Formatter}, +// sync::Arc, +// }; + +// use dubbo_base::Url; + +// pub type RegistryNotifyListener = Arc; +// pub trait Registry { +// fn register(&mut self, url: Url) -> Result<(), crate::StdError>; +// fn unregister(&mut self, url: Url) -> Result<(), crate::StdError>; + +// fn subscribe(&self, url: Url, listener: RegistryNotifyListener) -> Result<(), crate::StdError>; +// fn unsubscribe( +// &self, +// url: Url, +// listener: RegistryNotifyListener, +// ) -> Result<(), crate::StdError>; +// } + +// pub trait NotifyListener { +// fn notify(&self, event: ServiceEvent); +// fn notify_all(&self, event: ServiceEvent); +// } + +// #[derive(Debug)] +// pub struct ServiceEvent { +// pub key: String, +// pub action: String, +// pub service: Vec, +// } + +// pub type BoxRegistry = Box; + +// impl Debug for BoxRegistry { +// fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { +// f.write_str("BoxRegistry") +// } +// } diff --git a/dubbo/src/registry/n_registry.rs b/dubbo/src/registry/n_registry.rs new file mode 100644 index 00000000..abcd56b8 --- /dev/null +++ b/dubbo/src/registry/n_registry.rs @@ -0,0 +1,203 @@ +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use async_trait::async_trait; +use dubbo_base::Url; +use thiserror::Error; +use tokio::sync::{ + mpsc::{self, Receiver}, + Mutex, +}; +use tower::discover::Change; + +use crate::StdError; + +pub type ServiceChange = Change; +pub type DiscoverStream = Receiver>; +pub type BoxRegistry = Box; + +#[async_trait] +pub trait Registry { + async fn register(&self, url: Url) -> Result<(), StdError>; + + async fn unregister(&self, url: Url) -> Result<(), StdError>; + + async fn subscribe(&self, url: Url) -> Result; + + async fn unsubscribe(&self, url: Url) -> Result<(), StdError>; +} + +#[derive(Clone)] +pub struct ArcRegistry { + inner: Arc, +} + +pub enum RegistryComponent { + NacosRegistry(ArcRegistry), + ZookeeperRegistry, + StaticRegistry(StaticRegistry), +} + +pub struct StaticServiceValues { + listeners: Vec>>, + urls: HashSet, +} + +#[derive(Default)] +pub struct StaticRegistry { + urls: Mutex>, +} + +impl ArcRegistry { + pub fn new(registry: impl Registry + Send + Sync + 'static) -> Self { + Self { + inner: Arc::new(registry), + } + } +} + +#[async_trait] +impl Registry for ArcRegistry { + async fn register(&self, url: Url) -> Result<(), StdError> { + self.inner.register(url).await + } + + async fn unregister(&self, url: Url) -> Result<(), StdError> { + self.inner.unregister(url).await + } + + async fn subscribe(&self, url: Url) -> Result { + self.inner.subscribe(url).await + } + + async fn unsubscribe(&self, url: Url) -> Result<(), StdError> { + self.inner.unsubscribe(url).await + } +} + +#[async_trait] +impl Registry for RegistryComponent { + async fn register(&self, url: Url) -> Result<(), StdError> { + todo!() + } + + async fn unregister(&self, url: Url) -> Result<(), StdError> { + todo!() + } + + async fn subscribe(&self, url: Url) -> Result { + match self { + RegistryComponent::NacosRegistry(registry) => registry.subscribe(url).await, + RegistryComponent::ZookeeperRegistry => todo!(), + RegistryComponent::StaticRegistry(registry) => registry.subscribe(url).await, + } + } + + async fn unsubscribe(&self, url: Url) -> Result<(), StdError> { + todo!() + } +} + +impl StaticRegistry { + pub fn new(urls: Vec) -> Self { + let mut map = HashMap::with_capacity(urls.len()); + + for url in urls { + let service_name = url.get_service_name(); + let static_values = map + .entry(service_name) + .or_insert_with(|| StaticServiceValues { + listeners: Vec::new(), + urls: HashSet::new(), + }); + let url = url.to_string(); + static_values.urls.insert(url.clone()); + } + + Self { + urls: Mutex::new(map), + } + } +} + +#[async_trait] +impl Registry for StaticRegistry { + async fn register(&self, url: Url) -> Result<(), StdError> { + let service_name = url.get_service_name(); + let mut lock = self.urls.lock().await; + + let static_values = lock + .entry(service_name) + .or_insert_with(|| StaticServiceValues { + listeners: Vec::new(), + urls: HashSet::new(), + }); + let url = url.to_string(); + static_values.urls.insert(url.clone()); + + static_values.listeners.retain(|listener| { + let ret = listener.try_send(Ok(ServiceChange::Insert(url.clone(), ()))); + ret.is_ok() + }); + + Ok(()) + } + + async fn unregister(&self, url: Url) -> Result<(), StdError> { + let service_name = url.get_service_name(); + let mut lock = self.urls.lock().await; + + match lock.get_mut(&service_name) { + None => Ok(()), + Some(static_values) => { + let url = url.to_string(); + static_values.urls.remove(&url); + static_values.listeners.retain(|listener| { + let ret = listener.try_send(Ok(ServiceChange::Remove(url.clone()))); + ret.is_ok() + }); + if static_values.urls.is_empty() { + lock.remove(&service_name); + } + Ok(()) + } + } + } + + async fn subscribe(&self, url: Url) -> Result { + let service_name = url.get_service_name(); + + let change_rx = { + let mut lock = self.urls.lock().await; + let static_values = lock + .entry(service_name) + .or_insert_with(|| StaticServiceValues { + listeners: Vec::new(), + urls: HashSet::new(), + }); + + let (tx, change_rx) = mpsc::channel(64); + static_values.listeners.push(tx); + + for url in static_values.urls.iter() { + static_values.listeners.retain(|listener| { + let ret = listener.try_send(Ok(ServiceChange::Insert(url.clone(), ()))); + ret.is_ok() + }); + } + change_rx + }; + + Ok(change_rx) + } + + async fn unsubscribe(&self, url: Url) -> Result<(), StdError> { + Ok(()) + } +} + +#[derive(Error, Debug)] +#[error("static registry error: {0}")] +struct StaticRegistryError(String); diff --git a/dubbo/src/registry/protocol.rs b/dubbo/src/registry/protocol.rs index 12509503..b9ba7221 100644 --- a/dubbo/src/registry/protocol.rs +++ b/dubbo/src/registry/protocol.rs @@ -19,11 +19,10 @@ use dubbo_base::Url; use dubbo_logger::tracing; use std::{ collections::HashMap, - fmt::{Debug, Formatter}, - sync::{Arc, Mutex, RwLock}, + sync::{Arc, RwLock}, }; -use super::{memory_registry::MemoryRegistry, BoxRegistry}; +use super::n_registry::{ArcRegistry, Registry, StaticRegistry}; use crate::{ protocol::{ triple::{triple_exporter::TripleExporter, triple_protocol::TripleProtocol}, @@ -42,19 +41,6 @@ pub struct RegistryProtocol { services: HashMap>, } -impl Debug for RegistryProtocol { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str( - format!( - "RegistryProtocol services{:#?},registries,{:#?}", - self.services, - self.registries.clone() - ) - .as_str(), - ) - } -} - impl RegistryProtocol { pub fn new() -> Self { RegistryProtocol { @@ -74,16 +60,17 @@ impl RegistryProtocol { self } - pub fn get_registry(&mut self, url: Url) -> BoxRegistry { - let mem = MemoryRegistry::default(); + pub fn get_registry(&mut self, url: Url) -> ArcRegistry { + let mem = StaticRegistry::default(); + let mem = ArcRegistry::new(mem); self.registries .as_ref() .unwrap() .lock() .unwrap() - .insert(url.location, Arc::new(Mutex::new(Box::new(mem.clone())))); + .insert(url.location, mem.clone()); - Box::new(mem) + mem } } @@ -105,8 +92,8 @@ impl Protocol for RegistryProtocol { if let Some(urls) = registry_url { for url in urls.clone().iter() { if !url.service_key.is_empty() { - let mut reg = self.get_registry(url.clone()); - reg.register(url.clone()).unwrap(); + let reg = self.get_registry(url.clone()); + let _ = reg.register(url.clone()).await; } } } diff --git a/dubbo/src/registry/types.rs b/dubbo/src/registry/types.rs index ae7c7ca0..5c1687da 100644 --- a/dubbo/src/registry/types.rs +++ b/dubbo/src/registry/types.rs @@ -20,30 +20,22 @@ use std::{ sync::{Arc, Mutex}, }; -use dubbo_base::Url; -use dubbo_logger::tracing::info; use itertools::Itertools; -use crate::{ - registry::{BoxRegistry, Registry}, - StdError, -}; - -use super::RegistryNotifyListener; +use super::n_registry::ArcRegistry; -pub type SafeRegistry = Arc>; -pub type Registries = Arc>>; +pub type Registries = Arc>>; pub const DEFAULT_REGISTRY_KEY: &str = "default"; pub trait RegistriesOperation { - fn get(&self, registry_key: &str) -> SafeRegistry; - fn insert(&self, registry_key: String, registry: SafeRegistry); - fn default_registry(&self) -> SafeRegistry; + fn get(&self, registry_key: &str) -> ArcRegistry; + fn insert(&self, registry_key: String, registry: ArcRegistry); + fn default_registry(&self) -> ArcRegistry; } impl RegistriesOperation for Registries { - fn get(&self, registry_key: &str) -> SafeRegistry { + fn get(&self, registry_key: &str) -> ArcRegistry { self.as_ref() .lock() .unwrap() @@ -52,11 +44,11 @@ impl RegistriesOperation for Registries { .clone() } - fn insert(&self, registry_key: String, registry: SafeRegistry) { + fn insert(&self, registry_key: String, registry: ArcRegistry) { self.as_ref().lock().unwrap().insert(registry_key, registry); } - fn default_registry(&self) -> SafeRegistry { + fn default_registry(&self) -> ArcRegistry { let guard = self.as_ref().lock().unwrap(); let (_, result) = guard .iter() @@ -66,26 +58,3 @@ impl RegistriesOperation for Registries { result.clone() } } - -impl Registry for SafeRegistry { - fn register(&mut self, url: Url) -> Result<(), StdError> { - info!("register {}.", url); - self.lock().unwrap().register(url).expect("registry err."); - Ok(()) - } - - fn unregister(&mut self, url: Url) -> Result<(), StdError> { - self.lock().unwrap().register(url).expect("registry err."); - Ok(()) - } - - fn subscribe(&self, url: Url, listener: RegistryNotifyListener) -> Result<(), StdError> { - self.lock().unwrap().register(url).expect("registry err."); - Ok(()) - } - - fn unsubscribe(&self, url: Url, listener: RegistryNotifyListener) -> Result<(), StdError> { - self.lock().unwrap().register(url).expect("registry err."); - Ok(()) - } -} diff --git a/dubbo/src/route/mod.rs b/dubbo/src/route/mod.rs new file mode 100644 index 00000000..c2448642 --- /dev/null +++ b/dubbo/src/route/mod.rs @@ -0,0 +1,150 @@ +use std::pin::Pin; + +use dubbo_logger::tracing::debug; +use futures_core::{ready, Future}; +use futures_util::{future::Ready, FutureExt, TryFutureExt}; +use tower::{buffer::Buffer, util::FutureService}; +use tower_service::Service; + +use crate::{ + codegen::{RpcInvocation, TripleInvoker}, + invoker::clone_invoker::CloneInvoker, + param::Param, + svc::NewService, + StdError, +}; + +pub struct NewRoutes { + inner: N, +} + +pub struct NewRoutesFuture { + inner: RoutesFutureInnerState, + target: T, +} + +pub enum RoutesFutureInnerState { + Service(S), + Future( + Pin< + Box< + dyn Future>, StdError>> + + Send + + 'static, + >, + >, + ), + Ready(Vec>), +} + +#[derive(Clone)] +pub struct Routes { + target: T, + invokers: Vec>, +} + +impl NewRoutes { + pub fn new(inner: N) -> Self { + Self { inner } + } +} + +impl NewRoutes { + const MAX_ROUTE_BUFFER_SIZE: usize = 16; + + pub fn layer() -> impl tower_layer::Layer { + tower_layer::layer_fn(|inner: N| NewRoutes::new(inner)) + } +} + +impl NewService for NewRoutes +where + T: Param + Clone + Send + Unpin + 'static, + // NewDirectory + N: NewService, + // Directory + N::Service: Service<(), Response = Vec>> + Unpin + Send + 'static, + >::Error: Into, + >::Future: Send + 'static, +{ + type Service = + Buffer>::Service, T>, Routes>, ()>; + + fn new_service(&self, target: T) -> Self::Service { + let inner = self.inner.new_service(target.clone()); + + Buffer::new( + FutureService::new(NewRoutesFuture { + inner: RoutesFutureInnerState::Service(inner), + target, + }), + Self::MAX_ROUTE_BUFFER_SIZE, + ) + } +} + +impl Future for NewRoutesFuture +where + T: Param + Clone + Unpin, + // Directory + N: Service<(), Response = Vec>> + Unpin, + N::Error: Into, + N::Future: Send + 'static, +{ + type Output = Result, StdError>; + + fn poll( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let this = self.get_mut(); + + loop { + match this.inner { + RoutesFutureInnerState::Service(ref mut service) => { + debug!("RoutesFutureInnerState::Service"); + let _ = ready!(service.poll_ready(cx)).map_err(Into::into)?; + let fut = service.call(()).map_err(|e| e.into()).boxed(); + this.inner = RoutesFutureInnerState::Future(fut); + } + RoutesFutureInnerState::Future(ref mut futures) => { + debug!("RoutesFutureInnerState::Future"); + let invokers = ready!(futures.as_mut().poll(cx))?; + this.inner = RoutesFutureInnerState::Ready(invokers); + } + RoutesFutureInnerState::Ready(ref invokers) => { + debug!("RoutesFutureInnerState::Ready"); + let target = this.target.clone(); + return std::task::Poll::Ready(Ok(Routes { + invokers: invokers.clone(), + target, + })); + } + } + } + } +} + +impl Service<()> for Routes +where + T: Param + Clone, +{ + type Response = Vec>; + + type Error = StdError; + + type Future = Ready>; + + fn poll_ready( + &mut self, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + std::task::Poll::Ready(Ok(())) + } + + fn call(&mut self, _: ()) -> Self::Future { + // some router operator + // if new_invokers changed, send new invokers to routes_rx after router operator + futures_util::future::ok(self.invokers.clone()) + } +} diff --git a/dubbo/src/status.rs b/dubbo/src/status.rs index 7258b481..2a89fafd 100644 --- a/dubbo/src/status.rs +++ b/dubbo/src/status.rs @@ -281,6 +281,10 @@ impl Status { self.code } + pub fn message(&self) -> String { + self.message.clone() + } + pub fn to_http(&self) -> http::Response { let (mut parts, _) = http::Response::new(()).into_parts(); diff --git a/dubbo/src/svc.rs b/dubbo/src/svc.rs new file mode 100644 index 00000000..db59b92d --- /dev/null +++ b/dubbo/src/svc.rs @@ -0,0 +1,76 @@ +use std::{marker::PhantomData, sync::Arc}; + +pub trait NewService { + type Service; + + fn new_service(&self, target: T) -> Self::Service; +} + +pub struct ArcNewService { + inner: Arc + Send + Sync>, +} + +impl ArcNewService { + pub fn layer() -> impl tower_layer::Layer + Clone + Copy + where + N: NewService + Send + Sync + 'static, + S: Send + 'static, + { + tower_layer::layer_fn(Self::new) + } + + pub fn new(inner: N) -> Self + where + N: NewService + Send + Sync + 'static, + S: Send + 'static, + { + Self { + inner: Arc::new(inner), + } + } +} + +impl Clone for ArcNewService { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl NewService for ArcNewService { + type Service = S; + + fn new_service(&self, t: T) -> S { + self.inner.new_service(t) + } +} + +// inner: Box> + Send>>> + Send>, +pub struct BoxedService { + inner: N, + _mark: PhantomData, +} + +impl BoxedService { + pub fn layer() -> impl tower_layer::Layer { + tower_layer::layer_fn(|inner: N| Self { + inner, + _mark: PhantomData, + }) + } +} + +// impl NewService for BoxedService +// where +// N: NewService, +// N::Service: Service + Send, +// >::Future: Send + 'static, +// { +// type Service = Box>::Response, Error = >::Error, Future = Pin>::Response, >::Error>> + Send>>> + Send>; + +// fn new_service(&self, target: T) -> Self::Service { +// let service = self.inner.new_service(target); +// Box::new(service.map_future(|f|Box::pin(f) as _)) +// } +// } diff --git a/dubbo/src/triple/client/builder.rs b/dubbo/src/triple/client/builder.rs index 9dae0f93..0dadbcc1 100644 --- a/dubbo/src/triple/client/builder.rs +++ b/dubbo/src/triple/client/builder.rs @@ -18,26 +18,28 @@ use std::sync::Arc; use crate::{ - cluster::{directory::StaticDirectory, Cluster, Directory, MockCluster, MockDirectory}, - codegen::{RegistryDirectory, RpcInvocation, TripleInvoker}, - protocol::BoxInvoker, - triple::compression::CompressionEncoding, + cluster::NewCluster, + directory::NewCachedDirectory, + loadbalancer::NewLoadBalancer, + registry::n_registry::{ArcRegistry, RegistryComponent, StaticRegistry}, + route::NewRoutes, utils::boxed_clone::BoxCloneService, }; use aws_smithy_http::body::SdkBody; use dubbo_base::Url; - -use super::TripleClient; +use tower::ServiceBuilder; pub type ClientBoxService = BoxCloneService, http::Response, crate::Error>; -#[derive(Clone, Debug, Default)] +pub type ServiceMK = Arc>>>>; + +#[derive(Default)] pub struct ClientBuilder { pub timeout: Option, pub connector: &'static str, - directory: Option>, + registry: Option, pub direct: bool, host: String, } @@ -47,7 +49,7 @@ impl ClientBuilder { ClientBuilder { timeout: None, connector: "", - directory: None, + registry: None, direct: false, host: "".to_string(), } @@ -57,7 +59,10 @@ impl ClientBuilder { Self { timeout: None, connector: "", - directory: Some(Box::new(StaticDirectory::new(&host))), + registry: Some(ArcRegistry::new(StaticRegistry::new(vec![Url::from_url( + host, + ) + .unwrap()]))), direct: true, host: host.to_string(), } @@ -70,64 +75,41 @@ impl ClientBuilder { } } - /// host: http://0.0.0.0:8888 - pub fn with_directory(self, directory: Box) -> Self { - Self { - directory: Some(directory), - ..self - } - } - - pub fn with_registry_directory(self, registry: RegistryDirectory) -> Self { + pub fn with_registry(self, registry: ArcRegistry) -> Self { Self { - directory: Some(Box::new(registry)), + registry: Some(registry), ..self } } pub fn with_host(self, host: &'static str) -> Self { Self { - directory: Some(Box::new(StaticDirectory::new(&host))), + registry: Some(ArcRegistry::new(StaticRegistry::new(vec![Url::from_url( + host, + ) + .unwrap()]))), ..self } } pub fn with_connector(self, connector: &'static str) -> Self { - Self { - connector: connector, - ..self - } + Self { connector, ..self } } pub fn with_direct(self, direct: bool) -> Self { Self { direct, ..self } } - pub(crate) fn direct_build(self) -> TripleClient { - let mut cli = TripleClient { - send_compression_encoding: Some(CompressionEncoding::Gzip), - builder: Some(self.clone()), - invoker: None, - }; - cli.invoker = Some(Box::new(TripleInvoker::new( - Url::from_url(&self.host).unwrap(), - ))); - return cli; - } - - pub fn build(self, invocation: Arc) -> Option { - if self.direct { - return Some(Box::new(TripleInvoker::new( - Url::from_url(&self.host).unwrap(), - ))); - } - let invokers = match self.directory { - Some(v) => v.list(invocation), - None => panic!("use direct connection"), - }; + pub fn build(mut self) -> ServiceMK { + let registry = self.registry.take().expect("registry must not be empty"); - let cluster = MockCluster::default().join(Box::new(MockDirectory::new(invokers))); + let mk_service = ServiceBuilder::new() + .layer(NewCluster::layer()) + .layer(NewLoadBalancer::layer()) + .layer(NewRoutes::layer()) + .layer(NewCachedDirectory::layer()) + .service(registry); - return Some(cluster); + Arc::new(mk_service) } } diff --git a/dubbo/src/triple/client/triple.rs b/dubbo/src/triple/client/triple.rs index d4bc220b..377dc435 100644 --- a/dubbo/src/triple/client/triple.rs +++ b/dubbo/src/triple/client/triple.rs @@ -15,49 +15,42 @@ * limitations under the License. */ -use std::str::FromStr; - use futures_util::{future, stream, StreamExt, TryStreamExt}; use aws_smithy_http::body::SdkBody; use http::HeaderValue; -use prost::Message; -use serde::{Deserialize, Serialize}; +use tower_service::Service; -use super::builder::ClientBuilder; -use crate::codegen::{ProstCodec, RpcInvocation, SerdeCodec}; +use super::builder::{ClientBuilder, ServiceMK}; +use crate::codegen::RpcInvocation; use crate::{ invocation::{IntoStreamingRequest, Metadata, Request, Response}, - protocol::BoxInvoker, - status::Status, - triple::{ - codec::{Codec, Decoder, Encoder}, - compression::CompressionEncoding, - decode::Decoding, - encode::encode, - }, + svc::NewService, + triple::{codec::Codec, compression::CompressionEncoding, decode::Decoding, encode::encode}, }; -#[derive(Debug, Clone, Default)] +#[derive(Clone)] pub struct TripleClient { pub(crate) send_compression_encoding: Option, - pub(crate) builder: Option, - pub invoker: Option, + pub(crate) mk: ServiceMK, } impl TripleClient { pub fn connect(host: String) -> Self { let builder = ClientBuilder::from_static(&host).with_direct(true); + let mk = builder.build(); - builder.direct_build() + TripleClient { + send_compression_encoding: Some(CompressionEncoding::Gzip), + mk, + } } pub fn new(builder: ClientBuilder) -> Self { TripleClient { send_compression_encoding: Some(CompressionEncoding::Gzip), - builder: Some(builder), - invoker: None, + mk: builder.build(), } } @@ -112,11 +105,12 @@ impl TripleClient { if let Some(_encoding) = self.send_compression_encoding { req.headers_mut() .insert("grpc-encoding", http::HeaderValue::from_static("gzip")); - req.headers_mut().insert( - "grpc-accept-encoding", - http::HeaderValue::from_static("gzip"), - ); } + req.headers_mut().insert( + "grpc-accept-encoding", + http::HeaderValue::from_static("gzip"), + ); + // const ( // TripleContentType = "application/grpc+proto" // TripleUserAgent = "grpc-go/1.35.0-dev" @@ -132,54 +126,44 @@ impl TripleClient { req } - pub async fn unary( + pub async fn unary( &mut self, req: Request, + mut codec: C, path: http::uri::PathAndQuery, invocation: RpcInvocation, ) -> Result, crate::status::Status> where - M1: Message + Send + Sync + 'static + Serialize, - M2: Message + Send + Sync + 'static + for<'a> Deserialize<'a> + Default, + C: Codec, + M1: Send + Sync + 'static, + M2: Send + Sync + 'static, { - let (decoder, encoder): ( - Box + Send + 'static>, - Box + Send + 'static>, - ) = get_codec("application/grpc+proto"); let req = req.map(|m| stream::once(future::ready(m))); let body_stream = encode( - encoder, + codec.encoder(), req.into_inner().map(Ok), self.send_compression_encoding, - true, ) .into_stream(); let body = hyper::Body::wrap_stream(body_stream); - let bytes = hyper::body::to_bytes(body).await.unwrap(); - let sdk_body = SdkBody::from(bytes); - - let mut conn = match self.invoker.clone() { - Some(v) => v, - None => self - .builder - .clone() - .unwrap() - .build(invocation.into()) - .unwrap(), - }; - - let http_uri = http::Uri::from_str(&conn.get_url().to_url()).unwrap(); - let req = self.map_request(http_uri.clone(), path, sdk_body); - - let response = conn - .call(req) + + let mut invoker = self.mk.new_service(invocation); + + let request = http::Request::builder() + .header("path", path.to_string()) + .body(body) + .unwrap(); + + let response = invoker + .call(request) .await .map_err(|err| crate::status::Status::from_error(err.into())); match response { Ok(v) => { - let resp = v - .map(|body| Decoding::new(body, decoder, self.send_compression_encoding, true)); + let resp = v.map(|body| { + Decoding::new(body, codec.decoder(), self.send_compression_encoding) + }); let (mut parts, body) = Response::from_http(resp).into_parts(); futures_util::pin_mut!(body); @@ -203,53 +187,44 @@ impl TripleClient { } } - pub async fn bidi_streaming( + pub async fn bidi_streaming( &mut self, req: impl IntoStreamingRequest, + mut codec: C, path: http::uri::PathAndQuery, invocation: RpcInvocation, ) -> Result>, crate::status::Status> where - M1: Message + Send + Sync + 'static + Serialize, - M2: Message + Send + Sync + 'static + for<'a> Deserialize<'a> + Default, + C: Codec, + M1: Send + Sync + 'static, + M2: Send + Sync + 'static, { - let (decoder, encoder): ( - Box + Send + 'static>, - Box + Send + 'static>, - ) = get_codec("application/grpc+proto"); let req = req.into_streaming_request(); let en = encode( - encoder, + codec.encoder(), req.into_inner().map(Ok), self.send_compression_encoding, - true, ) .into_stream(); let body = hyper::Body::wrap_stream(en); - let sdk_body = SdkBody::from(body); - - let mut conn = match self.invoker.clone() { - Some(v) => v, - None => self - .builder - .clone() - .unwrap() - .build(invocation.into()) - .unwrap(), - }; - - let http_uri = http::Uri::from_str(&conn.get_url().to_url()).unwrap(); - let req = self.map_request(http_uri.clone(), path, sdk_body); - - let response = conn - .call(req) + + let mut invoker = self.mk.new_service(invocation); + + let request = http::Request::builder() + .header("path", path.to_string()) + .body(body) + .unwrap(); + + let response = invoker + .call(request) .await .map_err(|err| crate::status::Status::from_error(err.into())); match response { Ok(v) => { - let resp = v - .map(|body| Decoding::new(body, decoder, self.send_compression_encoding, true)); + let resp = v.map(|body| { + Decoding::new(body, codec.decoder(), self.send_compression_encoding) + }); Ok(Response::from_http(resp)) } @@ -257,54 +232,44 @@ impl TripleClient { } } - pub async fn client_streaming( + pub async fn client_streaming( &mut self, req: impl IntoStreamingRequest, + mut codec: C, path: http::uri::PathAndQuery, invocation: RpcInvocation, ) -> Result, crate::status::Status> where - M1: Message + Send + Sync + 'static + Serialize, - M2: Message + Send + Sync + 'static + for<'a> Deserialize<'a> + Default, + C: Codec, + M1: Send + Sync + 'static, + M2: Send + Sync + 'static, { - let (decoder, encoder): ( - Box + Send + 'static>, - Box + Send + 'static>, - ) = get_codec("application/grpc+proto"); let req = req.into_streaming_request(); let en = encode( - encoder, + codec.encoder(), req.into_inner().map(Ok), self.send_compression_encoding, - true, ) .into_stream(); let body = hyper::Body::wrap_stream(en); - let sdk_body = SdkBody::from(body); + let mut invoker = self.mk.new_service(invocation); - let mut conn = match self.invoker.clone() { - Some(v) => v, - None => self - .builder - .clone() - .unwrap() - .build(invocation.into()) - .unwrap(), - }; - - let http_uri = http::Uri::from_str(&conn.get_url().to_url()).unwrap(); - let req = self.map_request(http_uri.clone(), path, sdk_body); + let request = http::Request::builder() + .header("path", path.to_string()) + .body(body) + .unwrap(); // let mut conn = Connection::new().with_host(http_uri); - let response = conn - .call(req) + let response = invoker + .call(request) .await .map_err(|err| crate::status::Status::from_error(err.into())); match response { Ok(v) => { - let resp = v - .map(|body| Decoding::new(body, decoder, self.send_compression_encoding, true)); + let resp = v.map(|body| { + Decoding::new(body, codec.decoder(), self.send_compression_encoding) + }); let (mut parts, body) = Response::from_http(resp).into_parts(); futures_util::pin_mut!(body); @@ -328,52 +293,43 @@ impl TripleClient { } } - pub async fn server_streaming( + pub async fn server_streaming( &mut self, req: Request, + mut codec: C, path: http::uri::PathAndQuery, invocation: RpcInvocation, ) -> Result>, crate::status::Status> where - M1: Message + Send + Sync + 'static + Serialize, - M2: Message + Send + Sync + 'static + for<'a> Deserialize<'a> + Default, + C: Codec, + M1: Send + Sync + 'static, + M2: Send + Sync + 'static, { - let (decoder, encoder): ( - Box + Send + 'static>, - Box + Send + 'static>, - ) = get_codec("application/grpc+proto"); let req = req.map(|m| stream::once(future::ready(m))); let en = encode( - encoder, + codec.encoder(), req.into_inner().map(Ok), self.send_compression_encoding, - true, ) .into_stream(); let body = hyper::Body::wrap_stream(en); - let sdk_body = SdkBody::from(body); - - let mut conn = match self.invoker.clone() { - Some(v) => v, - None => self - .builder - .clone() - .unwrap() - .build(invocation.into()) - .unwrap(), - }; - let http_uri = http::Uri::from_str(&conn.get_url().to_url()).unwrap(); - let req = self.map_request(http_uri.clone(), path, sdk_body); - - let response = conn - .call(req) + let mut invoker = self.mk.new_service(invocation); + + let request = http::Request::builder() + .header("path", path.to_string()) + .body(body) + .unwrap(); + + let response = invoker + .call(request) .await .map_err(|err| crate::status::Status::from_error(err.into())); match response { Ok(v) => { - let resp = v - .map(|body| Decoding::new(body, decoder, self.send_compression_encoding, true)); + let resp = v.map(|body| { + Decoding::new(body, codec.decoder(), self.send_compression_encoding) + }); Ok(Response::from_http(resp)) } @@ -381,26 +337,3 @@ impl TripleClient { } } } - -pub fn get_codec( - content_type: &str, -) -> ( - Box + Send + 'static>, - Box + Send + 'static>, -) -where - M1: Message + Send + Sync + 'static + Serialize, - M2: Message + Send + Sync + 'static + for<'a> Deserialize<'a> + Default, -{ - //Determine whether to use JSON as the serialization method. - match content_type.ends_with("json") { - true => { - let mut codec = SerdeCodec::::default(); - (Box::new(codec.decoder()), Box::new(codec.encoder())) - } - false => { - let mut codec = ProstCodec::::default(); - (Box::new(codec.decoder()), Box::new(codec.encoder())) - } - } -} diff --git a/dubbo/src/triple/decode.rs b/dubbo/src/triple/decode.rs index 5b5df847..07c1160b 100644 --- a/dubbo/src/triple/decode.rs +++ b/dubbo/src/triple/decode.rs @@ -38,29 +38,22 @@ pub struct Decoding { trailers: Option, compress: Option, decompress_buf: BytesMut, - decode_as_grpc: bool, } #[derive(PartialEq)] enum State { ReadHeader, - ReadHttpBody, ReadBody { len: usize, is_compressed: bool }, Error, } impl Decoding { - pub fn new( - body: B, - decoder: Box + Send + 'static>, - compress: Option, - decode_as_grpc: bool, - ) -> Self + pub fn new(body: B, decoder: D, compress: Option) -> Self where B: Body + Send + 'static, B::Error: Into, + D: Decoder + Send + 'static, { - //Determine whether to use the gRPC mode to handle request data Self { state: State::ReadHeader, body: body @@ -72,12 +65,11 @@ impl Decoding { ) }) .boxed_unsync(), - decoder, + decoder: Box::new(decoder), buf: BytesMut::with_capacity(super::consts::BUFFER_SIZE), trailers: None, compress, decompress_buf: BytesMut::new(), - decode_as_grpc, } } @@ -99,47 +91,7 @@ impl Decoding { trailer.map(|data| data.map(Metadata::from_headers)) } - pub fn decode_http(&mut self) -> Result, crate::status::Status> { - if self.state == State::ReadHeader { - self.state = State::ReadHttpBody; - return Ok(None); - } - if let State::ReadHttpBody = self.state { - if self.buf.is_empty() { - return Ok(None); - } - match self.compress { - None => self.decompress_buf = self.buf.clone(), - Some(compress) => { - let len = self.buf.len(); - if let Err(err) = - decompress(compress, &mut self.buf, &mut self.decompress_buf, len) - { - return Err(crate::status::Status::new( - crate::status::Code::Internal, - err.to_string(), - )); - } - } - } - let len = self.decompress_buf.len(); - let decoding_result = self - .decoder - .decode(&mut DecodeBuf::new(&mut self.decompress_buf, len)); - - return match decoding_result { - Ok(Some(r)) => { - self.state = State::ReadHeader; - Ok(Some(r)) - } - Ok(None) => Ok(None), - Err(err) => Err(err), - }; - } - Ok(None) - } - - pub fn decode_grpc(&mut self) -> Result, crate::status::Status> { + pub fn decode_chunk(&mut self) -> Result, crate::status::Status> { if self.state == State::ReadHeader { // buffer is full if self.buf.remaining() < super::consts::HEADER_SIZE { @@ -214,14 +166,6 @@ impl Decoding { Ok(None) } - - pub fn decode_chunk(&mut self) -> Result, crate::status::Status> { - if self.decode_as_grpc { - self.decode_grpc() - } else { - self.decode_http() - } - } } impl Stream for Decoding { diff --git a/dubbo/src/triple/encode.rs b/dubbo/src/triple/encode.rs index baff6be3..b837463b 100644 --- a/dubbo/src/triple/encode.rs +++ b/dubbo/src/triple/encode.rs @@ -28,13 +28,13 @@ use crate::triple::codec::{EncodeBuf, Encoder}; #[allow(unused_must_use)] pub fn encode( - mut encoder: Box + Send + 'static>, + mut encoder: E, resp_body: B, compression_encoding: Option, - encode_as_grpc: bool, ) -> impl TryStream where - B: Stream>, + E: Encoder, + B: Stream>, { async_stream::stream! { let mut buf = BytesMut::with_capacity(super::consts::BUFFER_SIZE); @@ -48,13 +48,12 @@ where loop { match resp_body.next().await { Some(Ok(item)) => { - if encode_as_grpc { - buf.reserve(super::consts::HEADER_SIZE); - unsafe { - buf.advance_mut(super::consts::HEADER_SIZE); - } - } // 编码数据到缓冲中 + buf.reserve(super::consts::HEADER_SIZE); + unsafe { + buf.advance_mut(super::consts::HEADER_SIZE); + } + if enable_compress { uncompression_buf.clear(); @@ -67,21 +66,16 @@ where } else { encoder.encode(item, &mut EncodeBuf::new(&mut buf)).map_err(|_e| crate::status::Status::new(crate::status::Code::Internal, "encode error".to_string())); } - let result=match encode_as_grpc{ - true=>{ - let len = buf.len() - super::consts::HEADER_SIZE; - { - let mut buf = &mut buf[..super::consts::HEADER_SIZE]; - buf.put_u8(enable_compress as u8); - buf.put_u32(len as u32); - } - buf.split_to(len + super::consts::HEADER_SIZE) - } - false=>{ - buf.clone() - } - }; - yield Ok(result.freeze()); + + + let len = buf.len() - super::consts::HEADER_SIZE; + { + let mut buf = &mut buf[..super::consts::HEADER_SIZE]; + buf.put_u8(enable_compress as u8); + buf.put_u32(len as u32); + } + + yield Ok(buf.split_to(len + super::consts::HEADER_SIZE).freeze()); }, Some(Err(err)) => yield Err(err.into()), None => break, @@ -91,28 +85,28 @@ where } pub fn encode_server( - encoder: Box + Send + 'static>, + encoder: E, body: B, compression_encoding: Option, - encode_as_grpc: bool, ) -> EncodeBody>> where - B: Stream>, + E: Encoder, + B: Stream>, { - let s = encode(encoder, body, compression_encoding, encode_as_grpc).into_stream(); + let s = encode(encoder, body, compression_encoding).into_stream(); EncodeBody::new_server(s) } pub fn encode_client( - encoder: Box + Send + 'static>, + encoder: E, body: B, compression_encoding: Option, - is_grpc: bool, ) -> EncodeBody>> where - B: Stream, + E: Encoder, + B: Stream, { - let s = encode(encoder, body.map(Ok), compression_encoding, is_grpc).into_stream(); + let s = encode(encoder, body.map(Ok), compression_encoding).into_stream(); EncodeBody::new_client(s) } @@ -121,7 +115,6 @@ enum Role { Server, Client, } - #[pin_project] pub struct EncodeBody { #[pin] diff --git a/dubbo/src/triple/mod.rs b/dubbo/src/triple/mod.rs index 799ac099..07aa906a 100644 --- a/dubbo/src/triple/mod.rs +++ b/dubbo/src/triple/mod.rs @@ -23,3 +23,4 @@ pub mod decode; pub mod encode; pub mod server; pub mod transport; +pub mod triple_wrapper; diff --git a/dubbo/src/triple/server/mod.rs b/dubbo/src/triple/server/mod.rs index b36f7693..a5cf0cf0 100644 --- a/dubbo/src/triple/server/mod.rs +++ b/dubbo/src/triple/server/mod.rs @@ -17,6 +17,7 @@ pub mod builder; pub mod service; +pub mod support; pub mod triple; pub use triple::TripleServer; diff --git a/dubbo/src/triple/server/support.rs b/dubbo/src/triple/server/support.rs new file mode 100644 index 00000000..05f1eb24 --- /dev/null +++ b/dubbo/src/triple/server/support.rs @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use bytes::Buf; +use std::{ + sync::Arc, + task::{Context, Poll}, +}; + +use crate::codegen::ProstCodec; +use crate::param::Param; +use crate::status::Code; +use crate::triple::triple_wrapper::{TripleRequestWrapper, TripleResponseWrapper}; +use crate::{ + codegen::{Request, Response, UnarySvc}, + status::Status, + BoxBody, BoxFuture, StdError, +}; +use http_body::Body; +use tower::Service; + +use super::TripleServer; + +pub type RpcFuture = std::pin::Pin + Send>>; + +pub struct RpcMsg { + pub version: Option, + pub class_name: String, + pub method_name: String, + pub req: Vec, + pub res: Result, +} + +impl RpcMsg { + pub fn new(path: String, version: Option) -> Self { + let attr: Vec<&str> = path.split("/").collect(); + RpcMsg { + version, + class_name: attr[1].to_string(), + method_name: attr[2].to_string(), + req: vec![], + res: Err(Status::new(Code::Ok, "success".to_string())), + } + } +} + +pub trait RpcServer: Send + Sync + 'static { + fn invoke(&self, msg: RpcMsg) -> RpcFuture; + fn get_info(&self) -> (&str, &str, Option<&str>, Vec); +} + +struct _Inner(Arc); + +#[derive(Debug)] +pub struct RpcHttp2Server { + inner: _Inner, +} + +impl RpcHttp2Server { + pub fn new(inner: T) -> Self { + Self { + inner: _Inner(Arc::new(inner)), + } + } +} + +impl Service> for RpcHttp2Server +where + T: RpcServer + 'static, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, +{ + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let path = req.uri().path().to_string(); + let version = req + .headers() + .get("tri-service-version") + .map(|e| String::from_utf8_lossy(e.as_bytes()).to_string()); + let rpc_msg = RpcMsg::new(path, version); + let rpc_unary_server = RpcUnaryServer { + inner: self.inner.clone(), + msg: Some(rpc_msg), + }; + let mut server = + TripleServer::new(ProstCodec::::default()); + let fut = async move { + let res = server.unary(rpc_unary_server, req).await; + Ok(res) + }; + Box::pin(fut) + } +} + +#[allow(non_camel_case_types)] +struct RpcUnaryServer { + inner: _Inner, + msg: Option, +} + +impl UnarySvc for RpcUnaryServer { + type Response = TripleResponseWrapper; + type Future = BoxFuture, crate::status::Status>; + fn call(&mut self, request: Request) -> Self::Future { + let inner = self.inner.0.clone(); + let mut msg = self.msg.take().unwrap(); + msg.req = request.message.get_req(); + let fut = async move { + let res = inner.invoke(msg).await.res; + match res { + Ok(res) => Ok(Response::new(TripleResponseWrapper::new(res))), + Err(err) => Err(err), + } + }; + Box::pin(fut) + } +} + +impl Clone for RpcHttp2Server { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { inner } + } +} + +impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } +} diff --git a/dubbo/src/triple/server/triple.rs b/dubbo/src/triple/server/triple.rs index 2567b490..2c0626ce 100644 --- a/dubbo/src/triple/server/triple.rs +++ b/dubbo/src/triple/server/triple.rs @@ -16,18 +16,12 @@ */ use futures_util::{future, stream, StreamExt, TryStreamExt}; -use http::HeaderValue; use http_body::Body; -use prost::Message; -use serde::{Deserialize, Serialize}; -use std::marker::PhantomData; use crate::{ invocation::Request, - status::Status, triple::{ - client::triple::get_codec, - codec::{Decoder, Encoder}, + codec::Codec, compression::{CompressionEncoding, COMPRESSIONS}, decode::Decoding, encode::encode_server, @@ -40,24 +34,23 @@ use dubbo_config::BusinessConfig; pub const GRPC_ACCEPT_ENCODING: &str = "grpc-accept-encoding"; pub const GRPC_ENCODING: &str = "grpc-encoding"; -pub struct TripleServer { - _pd: PhantomData<(M1, M2)>, +pub struct TripleServer { + codec: T, compression: Option, } -impl TripleServer { - pub fn new() -> Self { +impl TripleServer { + pub fn new(codec: T) -> Self { Self { - _pd: PhantomData, - compression: Some(CompressionEncoding::Gzip), + codec, + compression: None, } } } -impl TripleServer +impl TripleServer where - M1: Message + for<'a> Deserialize<'a> + Default + 'static, - M2: Message + Serialize + Default + 'static, + T: Codec, { pub async fn client_streaming( &mut self, @@ -65,20 +58,10 @@ where req: http::Request, ) -> http::Response where - S: ClientStreamingSvc, + S: ClientStreamingSvc, B: Body + Send + 'static, B::Error: Into + Send, { - let content_type = req - .headers() - .get("content-type") - .cloned() - .unwrap_or(HeaderValue::from_str("application/grpc+proto").unwrap()); - let content_type_str = content_type.to_str().unwrap(); - let (decoder, encoder): ( - Box + Send + 'static>, - Box + Send + 'static>, - ) = get_codec(content_type_str); let mut accept_encoding = CompressionEncoding::from_accept_encoding(req.headers()); if self.compression.is_none() || accept_encoding.is_none() { accept_encoding = None; @@ -90,7 +73,7 @@ where Err(status) => return status.to_http(), }; - let req_stream = req.map(|body| Decoding::new(body, decoder, compression, true)); + let req_stream = req.map(|body| Decoding::new(body, self.codec.decoder(), compression)); let resp = service.call(Request::from_http(req_stream)).await; @@ -100,15 +83,15 @@ where }; let resp_body = encode_server( - encoder, + self.codec.encoder(), stream::once(future::ready(resp_body)).map(Ok).into_stream(), accept_encoding, - true, ); - parts - .headers - .insert(http::header::CONTENT_TYPE, content_type); + parts.headers.insert( + http::header::CONTENT_TYPE, + http::HeaderValue::from_static("application/grpc"), + ); if let Some(encoding) = accept_encoding { parts .headers @@ -124,21 +107,11 @@ where req: http::Request, ) -> http::Response where - S: StreamingSvc, + S: StreamingSvc, S::ResponseStream: Send + 'static, B: Body + Send + 'static, B::Error: Into + Send, { - let content_type = req - .headers() - .get("content-type") - .cloned() - .unwrap_or(HeaderValue::from_str("application/grpc+proto").unwrap()); - let content_type_str = content_type.to_str().unwrap(); - let (decoder, encoder): ( - Box + Send + 'static>, - Box + Send + 'static>, - ) = get_codec(content_type_str); // Firstly, get grpc_accept_encoding from http_header, get compression // Secondly, if server enable compression and compression is valid, this method should compress response let mut accept_encoding = CompressionEncoding::from_accept_encoding(req.headers()); @@ -152,7 +125,7 @@ where Err(status) => return status.to_http(), }; - let req_stream = req.map(|body| Decoding::new(body, decoder, compression, true)); + let req_stream = req.map(|body| Decoding::new(body, self.codec.decoder(), compression)); let resp = service.call(Request::from_http(req_stream)).await; @@ -160,11 +133,12 @@ where Ok(v) => v.into_http().into_parts(), Err(err) => return err.to_http(), }; - let resp_body = encode_server(encoder, resp_body, compression, true); + let resp_body = encode_server(self.codec.encoder(), resp_body, compression); - parts - .headers - .insert(http::header::CONTENT_TYPE, content_type); + parts.headers.insert( + http::header::CONTENT_TYPE, + http::HeaderValue::from_static("application/grpc"), + ); if let Some(encoding) = accept_encoding { parts .headers @@ -180,21 +154,11 @@ where req: http::Request, ) -> http::Response where - S: ServerStreamingSvc, + S: ServerStreamingSvc, S::ResponseStream: Send + 'static, B: Body + Send + 'static, B::Error: Into + Send, { - let content_type = req - .headers() - .get("content-type") - .cloned() - .unwrap_or(HeaderValue::from_str("application/grpc+proto").unwrap()); - let content_type_str = content_type.to_str().unwrap(); - let (decoder, encoder): ( - Box + Send + 'static>, - Box + Send + 'static>, - ) = get_codec(content_type_str); // Firstly, get grpc_accept_encoding from http_header, get compression // Secondly, if server enable compression and compression is valid, this method should compress response let mut accept_encoding = CompressionEncoding::from_accept_encoding(req.headers()); @@ -207,7 +171,8 @@ where Ok(val) => val, Err(status) => return status.to_http(), }; - let req_stream = req.map(|body| Decoding::new(body, decoder, compression, true)); + + let req_stream = req.map(|body| Decoding::new(body, self.codec.decoder(), compression)); let (parts, mut body) = Request::from_http(req_stream).into_parts(); let msg = body.try_next().await.unwrap().ok_or_else(|| { crate::status::Status::new(crate::status::Code::Unknown, "request wrong".to_string()) @@ -223,11 +188,12 @@ where Ok(v) => v.into_http().into_parts(), Err(err) => return err.to_http(), }; - let resp_body = encode_server(encoder, resp_body, compression, true); + let resp_body = encode_server(self.codec.encoder(), resp_body, compression); - parts - .headers - .insert(http::header::CONTENT_TYPE, content_type); + parts.headers.insert( + http::header::CONTENT_TYPE, + http::HeaderValue::from_static("application/grpc"), + ); if let Some(encoding) = accept_encoding { parts .headers @@ -243,7 +209,7 @@ where req: http::Request, ) -> http::Response where - S: UnarySvc, + S: UnarySvc, B: Body + Send + 'static, B::Error: Into + Send, { @@ -251,24 +217,13 @@ where if self.compression.is_none() || accept_encoding.is_none() { accept_encoding = None; } + let compression = match self.get_encoding_from_req(req.headers()) { Ok(val) => val, Err(status) => return status.to_http(), }; - let content_type = req - .headers() - .get("content-type") - .cloned() - .unwrap_or(HeaderValue::from_str("application/grpc+proto").unwrap()); - let content_type_str = content_type.to_str().unwrap(); - //Determine whether to use the gRPC mode to handle request data - let handle_request_as_grpc = content_type_str.contains("grpc"); - let (decoder, encoder): ( - Box + Send + 'static>, - Box + Send + 'static>, - ) = get_codec(content_type_str); - let req_stream = - req.map(|body| Decoding::new(body, decoder, compression, handle_request_as_grpc)); + + let req_stream = req.map(|body| Decoding::new(body, self.codec.decoder(), compression)); let (parts, mut body) = Request::from_http(req_stream).into_parts(); let msg = body.try_next().await.unwrap().ok_or_else(|| { crate::status::Status::new(crate::status::Code::Unknown, "request wrong".to_string()) @@ -285,15 +240,15 @@ where Err(err) => return err.to_http(), }; let resp_body = encode_server( - encoder, + self.codec.encoder(), stream::once(future::ready(resp_body)).map(Ok).into_stream(), accept_encoding, - handle_request_as_grpc, ); - parts - .headers - .insert(http::header::CONTENT_TYPE, content_type); + parts.headers.insert( + http::header::CONTENT_TYPE, + http::HeaderValue::from_static("application/grpc"), + ); if let Some(encoding) = accept_encoding { parts .headers @@ -327,7 +282,7 @@ where } } -impl BusinessConfig for TripleServer { +impl BusinessConfig for TripleServer { fn init() -> Self { todo!() } diff --git a/dubbo/src/triple/transport/connection.rs b/dubbo/src/triple/transport/connection.rs index 10a879a5..cb0b9d71 100644 --- a/dubbo/src/triple/transport/connection.rs +++ b/dubbo/src/triple/transport/connection.rs @@ -15,19 +15,24 @@ * limitations under the License. */ -use std::task::Poll; - -use dubbo_logger::tracing::debug; use hyper::client::{conn::Builder, service::Connect}; use tower_service::Service; -use crate::{boxed, triple::transport::connector::get_connector}; +use crate::{ + boxed, invoker::clone_body::CloneBody, triple::transport::connector::get_connector, StdError, +}; + +type HyperConnect = Connect< + crate::utils::boxed_clone::BoxCloneService, + CloneBody, + http::Uri, +>; -#[derive(Debug, Clone)] pub struct Connection { host: hyper::Uri, - connector: String, + connector: &'static str, builder: Builder, + connect: Option, } impl Default for Connection { @@ -40,12 +45,13 @@ impl Connection { pub fn new() -> Self { Connection { host: hyper::Uri::default(), - connector: "http".to_string(), + connector: "http", builder: Builder::new(), + connect: None, } } - pub fn with_connector(mut self, connector: String) -> Self { + pub fn with_connector(mut self, connector: &'static str) -> Self { self.connector = connector; self } @@ -59,14 +65,16 @@ impl Connection { self.builder = builder; self } + + pub fn build(mut self) -> Self { + let builder = self.builder.clone().http2_only(true).to_owned(); + let hyper_connect: HyperConnect = Connect::new(get_connector(self.connector), builder); + self.connect = Some(hyper_connect); + self + } } -impl Service> for Connection -where - ReqBody: http_body::Body + Unpin + Send + 'static, - ReqBody::Data: Send + Unpin, - ReqBody::Error: Into, -{ +impl Service> for Connection { type Response = http::Response; type Error = crate::Error; @@ -75,25 +83,34 @@ where fn poll_ready( &mut self, - _cx: &mut std::task::Context<'_>, + cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { - Poll::Ready(Ok(())) + match self.connect { + None => { + panic!("connection must be built before use") + } + Some(ref mut connect) => connect.poll_ready(cx).map_err(|e| e.into()), + } } - fn call(&mut self, req: http::Request) -> Self::Future { - let builder = self.builder.clone().http2_only(true).to_owned(); - let mut connector = Connect::new(get_connector(self.connector.as_str()), builder); - let uri = self.host.clone(); - let fut = async move { - debug!("send base call to {}", uri); - let mut con = connector.call(uri).await.unwrap(); - - con.call(req) - .await - .map_err(|err| err.into()) - .map(|res| res.map(boxed)) - }; - - Box::pin(fut) + fn call(&mut self, req: http::Request) -> Self::Future { + match self.connect { + None => { + panic!("connection must be built before use") + } + Some(ref mut connect) => { + let uri = self.host.clone(); + let call_fut = connect.call(uri); + let fut = async move { + let mut con = call_fut.await.unwrap(); + con.call(req) + .await + .map_err(|err| err.into()) + .map(|res| res.map(boxed)) + }; + + return Box::pin(fut); + } + } } } diff --git a/dubbo/src/triple/transport/connector/https_connector.rs b/dubbo/src/triple/transport/connector/https_connector.rs deleted file mode 100644 index eb217cb6..00000000 --- a/dubbo/src/triple/transport/connector/https_connector.rs +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::{ - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - str::FromStr, - sync::Arc, -}; - -use dubbo_logger::tracing; -use http::Uri; -use hyper::client::connect::dns::Name; -use rustls_native_certs::load_native_certs; -use tokio::net::TcpStream; -use tokio_rustls::{ - client::TlsStream, - rustls::{self}, - TlsConnector as TlsConnectorTokio, -}; -use tower_service::Service; - -use crate::triple::transport::resolver::{dns::DnsResolver, Resolve}; - -#[derive(Clone, Default)] -pub struct HttpsConnector { - resolver: R, -} - -impl HttpsConnector { - pub fn new() -> Self { - Self { - resolver: DnsResolver::default(), - } - } -} - -impl HttpsConnector { - pub fn new_with_resolver(resolver: R) -> HttpsConnector { - Self { resolver } - } -} - -impl Service for HttpsConnector -where - R: Resolve + Clone + Send + Sync + 'static, - R::Future: Send, -{ - type Response = TlsStream; - - type Error = crate::Error; - - type Future = crate::BoxFuture; - - fn poll_ready( - &mut self, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.resolver.poll_ready(cx).map_err(|err| err.into()) - } - - fn call(&mut self, uri: Uri) -> Self::Future { - let mut inner = self.clone(); - - Box::pin(async move { inner.call_async(uri).await }) - } -} - -impl HttpsConnector -where - R: Resolve + Send + Sync + 'static, -{ - async fn call_async(&mut self, uri: Uri) -> Result, crate::Error> { - let host = uri.host().unwrap(); - let port = uri.port_u16().unwrap(); - - let addr = if let Ok(addr) = host.parse::() { - tracing::info!("host is ip address: {:?}", host); - SocketAddr::V4(SocketAddrV4::new(addr, port)) - } else { - tracing::info!("host is dns: {:?}", host); - let addrs = self - .resolver - .resolve(Name::from_str(host).unwrap()) - .await - .map_err(|err| err.into())?; - let addrs: Vec = addrs - .map(|mut addr| { - addr.set_port(port); - addr - }) - .collect(); - addrs[0] - }; - - let mut root_store = rustls::RootCertStore::empty(); - - for cert in load_native_certs()? { - root_store.add(&rustls::Certificate(cert.0))?; - } - - let config = rustls::ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(root_store) - .with_no_client_auth(); - - let connector = TlsConnectorTokio::from(Arc::new(config)); - - let stream = TcpStream::connect(&addr).await?; - let domain = rustls::ServerName::try_from(host).map_err(|err| { - crate::status::Status::new(crate::status::Code::Internal, err.to_string()) - })?; - let stream = connector.connect(domain, stream).await?; - - Ok(stream) - } -} diff --git a/dubbo/src/triple/transport/connector/mod.rs b/dubbo/src/triple/transport/connector/mod.rs index 690b781a..703201ee 100644 --- a/dubbo/src/triple/transport/connector/mod.rs +++ b/dubbo/src/triple/transport/connector/mod.rs @@ -16,7 +16,6 @@ */ pub mod http_connector; -pub mod https_connector; #[cfg(any(target_os = "macos", target_os = "unix"))] pub mod unix_connector; @@ -74,16 +73,12 @@ where } } -pub fn get_connector(connector: &str) -> BoxCloneService { +pub fn get_connector(connector: &'static str) -> BoxCloneService { match connector { "http" => { let c = http_connector::HttpConnector::new(); BoxCloneService::new(Connector::new(c)) } - "https" => { - let c = https_connector::HttpsConnector::new(); - BoxCloneService::new(Connector::new(c)) - } #[cfg(any(target_os = "macos", target_os = "unix"))] "unix" => { let c = unix_connector::UnixConnector::new(); diff --git a/dubbo/src/triple/triple_wrapper.rs b/dubbo/src/triple/triple_wrapper.rs new file mode 100644 index 00000000..d898e93e --- /dev/null +++ b/dubbo/src/triple/triple_wrapper.rs @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use prost::Message; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TripleRequestWrapper { + /// hessian4 + /// json + #[prost(string, tag = "1")] + pub serialize_type: ::prost::alloc::string::String, + #[prost(bytes = "vec", repeated, tag = "2")] + pub args: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(string, repeated, tag = "3")] + pub arg_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TripleResponseWrapper { + #[prost(string, tag = "1")] + pub serialize_type: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub r#type: ::prost::alloc::string::String, +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TripleExceptionWrapper { + #[prost(string, tag = "1")] + pub language: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub serialization: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub class_name: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "4")] + pub data: ::prost::alloc::vec::Vec, +} + +impl TripleRequestWrapper { + pub fn new(data: Vec) -> Self { + let mut trip = TripleRequestWrapper::default(); + trip.serialize_type = "fastjson".to_string(); + trip.args = data.iter().map(|e| e.as_bytes().to_vec()).collect(); + return trip; + } + pub fn get_req(self) -> Vec { + let mut res = vec![]; + for str in self.args { + res.push(String::from_utf8(str).unwrap()); + } + return res; + } +} + +impl TripleResponseWrapper { + pub fn new(data: String) -> TripleResponseWrapper { + let mut trip = TripleResponseWrapper::default(); + trip.serialize_type = "fastjson".to_string(); + trip.data = data.as_bytes().to_vec(); + return trip; + } +} diff --git a/dubbo/src/utils/tls.rs b/dubbo/src/utils/tls.rs index aaf59c94..0072bf24 100644 --- a/dubbo/src/utils/tls.rs +++ b/dubbo/src/utils/tls.rs @@ -15,10 +15,10 @@ * limitations under the License. */ -use rustls_pemfile::{certs, ec_private_keys, pkcs8_private_keys, rsa_private_keys}; +use rustls_pemfile::{certs, rsa_private_keys}; use std::{ fs::File, - io::{self, BufReader, Cursor, Read}, + io::{self, BufReader}, path::Path, }; use tokio_rustls::rustls::{Certificate, PrivateKey}; @@ -30,22 +30,7 @@ pub fn load_certs(path: &Path) -> io::Result> { } pub fn load_keys(path: &Path) -> io::Result> { - let file = &mut BufReader::new(File::open(path)?); - let mut data = Vec::new(); - file.read_to_end(&mut data)?; - - let mut cursor = Cursor::new(data); - - let parsers = [rsa_private_keys, pkcs8_private_keys, ec_private_keys]; - - for parser in &parsers { - if let Ok(mut key) = parser(&mut cursor) { - if !key.is_empty() { - return Ok(key.drain(..).map(PrivateKey).collect()); - } - } - cursor.set_position(0); - } - - Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")) + rsa_private_keys(&mut BufReader::new(File::open(path)?)) + .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid key")) + .map(|mut keys| keys.drain(..).map(PrivateKey).collect()) } diff --git a/examples/echo/Cargo.toml b/examples/echo/Cargo.toml index 0ad345bc..bc6638b8 100644 --- a/examples/echo/Cargo.toml +++ b/examples/echo/Cargo.toml @@ -19,24 +19,13 @@ path = "src/echo/server.rs" name = "echo-client" path = "src/echo/client.rs" -[[bin]] -name = "echo-tls-server" -path = "src/echo-tls/server.rs" - -[[bin]] -name = "echo-tls-client" -path = "src/echo-tls/client.rs" - [dependencies] http = "0.2" http-body = "0.4.4" futures-util = {version = "0.3", default-features = false} tokio = { version = "1.0", features = [ "rt-multi-thread", "time", "fs", "macros", "net", "signal"] } -prost-derive = {version = "0.11.9", optional = true} -prost = "0.11.9" -prost-serde = "0.3.0" -prost-serde-derive = "0.1.2" -serde = { version = "1.0.171",features = ["derive"] } +prost-derive = {version = "0.10", optional = true} +prost = "0.10.4" async-trait = "0.1.56" tokio-stream = "0.1" dubbo-logger.workspace=true diff --git a/examples/echo/README.md b/examples/echo/README.md index 14868718..e2f73794 100644 --- a/examples/echo/README.md +++ b/examples/echo/README.md @@ -15,21 +15,3 @@ reply: EchoResponse { message: "msg1 from server" } reply: EchoResponse { message: "msg2 from server" } reply: EchoResponse { message: "msg3 from server" } ``` - -## build and run `echo-tls` - -**Please first install the `ca.crt` certificate file under the `fixtures` path to the platform's native certificate store.** - -```sh -$ cd github.com/apache/dubbo-rust/examples/echo-tls/ -$ cargo build - -$ # run sever -$ ../../target/debug/echo-tls-server - -$ # run client -$ ../../target/debug/echo-tls-client -reply: EchoResponse { message: "msg1 from tls-server" } -reply: EchoResponse { message: "msg2 from tls-server" } -reply: EchoResponse { message: "msg3 from tls-server" } -``` diff --git a/examples/echo/README_CN.md b/examples/echo/README_CN.md index a37862c6..b52cc1c4 100644 --- a/examples/echo/README_CN.md +++ b/examples/echo/README_CN.md @@ -15,21 +15,3 @@ reply: EchoResponse { message: "msg1 from server" } reply: EchoResponse { message: "msg2 from server" } reply: EchoResponse { message: "msg3 from server" } ``` - -## 构建并运行`echo-tls` - -**请先将`fixtures`路径下的`ca.crt`证书文件安装到系统信任根证书中.** - -```sh -$ cd github.com/apache/dubbo-rust/examples/echo-tls/ -$ cargo build - -$ # 运行服务端 -$ ../../target/debug/echo-tls-server - -$ # 运行客户端 -$ ../../target/debug/echo-tls-client -reply: EchoResponse { message: "msg1 from tls-server" } -reply: EchoResponse { message: "msg2 from tls-server" } -reply: EchoResponse { message: "msg3 from tls-server" } -``` \ No newline at end of file diff --git a/examples/echo/fixtures/ca.crt b/examples/echo/fixtures/ca.crt deleted file mode 100644 index ea2b4858..00000000 --- a/examples/echo/fixtures/ca.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDiTCCAnGgAwIBAgIUGJWxrMGe9qRZzAfd5w4XIT3lkcEwDQYJKoZIhvcNAQEL -BQAwVDEVMBMGA1UEAwwMVGVzdCBSb290IENBMQswCQYDVQQGEwJVUzENMAsGA1UE -CAwEVGVzdDENMAsGA1UEBwwEVGVzdDEQMA4GA1UECgwHT3BlbmRhbDAeFw0yMzA4 -MTQxMTEzMzRaFw0yNDA4MTMxMTEzMzRaMFQxFTATBgNVBAMMDFRlc3QgUm9vdCBD -QTELMAkGA1UEBhMCVVMxDTALBgNVBAgMBFRlc3QxDTALBgNVBAcMBFRlc3QxEDAO -BgNVBAoMB09wZW5kYWwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCt -UF6mcDgSyH5t78XnJusvQxsUfv2XydHtvLcIpwkCkIuIj7nF2WH064Gv12x+y42W -mb+5z6JTgHRMRqcyQM8q4PQFrKvxPX8R2Limd7VLBJzYjR7Ma7JIrDohLnfywxUP -19P5SzaGiro+ZK3t3xCnmtHcYoM+An0mQdKyVV7ytzAfg1PqkfDme19I28fH8cOP -tF+RU8/LEHnte519O1bawx7xNdPsyykMrFij02o1VUeum2K9Wya8xHDixokveYDW -swg5G4Tsy1QfgqFgxAXahIroPIwQvZOGkWVsmPXRXHtHNFG91ntJivv2HBFniUTq -A0UbVdj09T+h+JLc19G9AgMBAAGjUzBRMB0GA1UdDgQWBBQ2672x8uh6Lud0EkjO -wt2aEioeKjAfBgNVHSMEGDAWgBQ2672x8uh6Lud0EkjOwt2aEioeKjAPBgNVHRMB -Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCQkLp3GzZOXXXOKiMF6Iev1OUW -w1jr7hVdJHOVGNCD6uZLuwSXJOWnEP8+hp8WvMl7SQAPpVYsTjdqhLATLaAZDucG -sDq6oUTh/v8QVIBm0qF8+iMU8XZfgoeKuY13RXs23hneMAPQ5rcPwQhQEQkkqUvi -Fq8qYFVd5mEr6Z62DT0s544WaBrpHr37mHOv0hIkHtX7Dy2Juc23MYw+W4PSD4fm -sr1kARwHtY1meX+H3iRsX+7juTa33v+7H4IivhcPobIxFp+Hs9R5mx5u80wKMjVv -t3STmB4nE7pABzucrjkSo43jIUwYN4rwydlSma9VkzvY6ry86HQuemycRb9H ------END CERTIFICATE----- diff --git a/examples/echo/fixtures/server.crt b/examples/echo/fixtures/server.crt deleted file mode 100644 index 6788eb4c..00000000 --- a/examples/echo/fixtures/server.crt +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID1zCCAr+gAwIBAgIUJKTfvASV+RnwF7oLO84HZJDyvLwwDQYJKoZIhvcNAQEL -BQAwVDEVMBMGA1UEAwwMVGVzdCBSb290IENBMQswCQYDVQQGEwJVUzENMAsGA1UE -CAwEVGVzdDENMAsGA1UEBwwEVGVzdDEQMA4GA1UECgwHT3BlbmRhbDAeFw0yMzA4 -MTQxNDU5MzZaFw0yNDA2MDkxNDU5MzZaMFkxGjAYBgNVBAMMEVJlZGlzIGNlcnRp -ZmljYXRlMQswCQYDVQQGEwJVUzENMAsGA1UECAwEVGVzdDENMAsGA1UEBwwEVGVz -dDEQMA4GA1UECgwHT3BlbmRhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAIwREKDrRgZ2jlR3tpLHvMiW8JDu4JiLBxyrlJJE5ndhuH7MEgwz8HnXvxbD -eyuamzkAzQIvqfVFVTRuVEYyEtoGzIegDL76H9ybuMGhKBK1m0TmiH7bOsAVMqZN -vDtQJiw8qePtSq3G3H7Sw+/oudrJIc/f7kDox/lndKHTBmLbjSrvpkOJk2qnvhPJ -ih4SuLNiW+tHv4sUdYBXXxn2wLHXNLGrlpeW28jtWGfu2noRCzikOYL/jwg2xzXV -cBSuFwQ3swLDG/htqpePVA/sLxbXTt03A8fCajYcKiJdW88gqw4dW01ya8rCr5MU -1C7lPwNCB8qNn8pdkmrh/Oc0zDsCAwEAAaOBmzCBmDAfBgNVHSMEGDAWgBQ2672x -8uh6Lud0EkjOwt2aEioeKjAJBgNVHRMEAjAAMAsGA1UdDwQEAwIE8DA+BgNVHREE -NzA1gglsb2NhbGhvc3SHBH8AAAGHBKweAAKHBKweAAOHBKweAASHBKweAAWHBKwe -AAaHBKweAAcwHQYDVR0OBBYEFGvNF07RBwyi3tbpFIJtvWhXAGblMA0GCSqGSIb3 -DQEBCwUAA4IBAQAd57+0YXfg8eIe2UkqLshIEonoIpKhmsIpRJyXLOUWYaSHri4w -aDqPjogA39w34UcZsumfSReWBGrCyBroSCqQZOM166tw79+AVdjHHtgNm8pFRhO7 -0vnFdAU30TOQP+mRF3mXz3hcK68U/4cRhXC5jXq8YRLiAG74G3PmXmmk2phtluEL -SLLCvF5pCz3EaYsEKP+ZQpdY3BLp6Me7XDpGWPuNYVwVTJwwM9CLjQ8pxMlz1O1x -HVN7xGtLz4dw9nEqnmjYBvH8aum+iAQPiHVuGfQfqIea28XeuyV4c5TL2b+OUsLY -BRhX+z5OkGHXcMc1QDKo3PZcs8C1w8SC1x9D ------END CERTIFICATE----- diff --git a/examples/echo/fixtures/server.key b/examples/echo/fixtures/server.key deleted file mode 100644 index ad53eac5..00000000 --- a/examples/echo/fixtures/server.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCMERCg60YGdo5U -d7aSx7zIlvCQ7uCYiwccq5SSROZ3Ybh+zBIMM/B5178Ww3srmps5AM0CL6n1RVU0 -blRGMhLaBsyHoAy++h/cm7jBoSgStZtE5oh+2zrAFTKmTbw7UCYsPKnj7Uqtxtx+ -0sPv6LnaySHP3+5A6Mf5Z3Sh0wZi240q76ZDiZNqp74TyYoeErizYlvrR7+LFHWA -V18Z9sCx1zSxq5aXltvI7Vhn7tp6EQs4pDmC/48INsc11XAUrhcEN7MCwxv4baqX -j1QP7C8W107dNwPHwmo2HCoiXVvPIKsOHVtNcmvKwq+TFNQu5T8DQgfKjZ/KXZJq -4fznNMw7AgMBAAECggEAA5l0ABv7s90mbDTwBqvxBdtHJFpXKuRhEui1NosPEctQ -6+/qQ3uu4YVcqO+YweFFEufFMkSvopjHse4R5q87vR7xRkej0Yvo914zFxrBRmB6 -CdZoyeXFXTv442gvqaXgzUCOgcfOeafDcSjmayBjwk5qkDEqKhXb/w9HDS+N7vVk -BU+b/lMzQbGWb/oc3pHmEYXqFR+sFkHM2nCWBvQ1hRX4TVaeZpUWH7RBE95z87ug -F21yqEjQfaTh2cidKXWtnozxIv2XgUncY40njdhRRzyJqWIW4CEdxIAX0IWT0z2+ -4L59DoNyYaimnaaSmNDj5WgDgL7tlTziXBJfBTpqDQKBgQDAe6Tf49eZINZ6bxHC -RDzFSKikBOvC9rkhGOzD6JBALPbdWH9HnnH4cT5F4b5y96rPEqzO1+RYQdIF4GDx -NYafMx8Nht0j0WWJLkygUCa8guqaaFczaquaIHQ2YpzzhpLlmAdEz1Jrsk2LfM2Q -58b7JHb+Aq/+UAIuBhL7FlRf7QKBgQC6SXR6opkjUfkrsWcQiB5CJDk7zuL8G+Ks -Jle2S1TzFdBL5rNnVttC7yYmZIP7qN6zDtsbDxqW5gEeDmyGDixLeQ0kQ/oPU2/y -lPr9rHx+BUWEGyDiCfG21Y6R/jdfrA4R5T8vPmJXOnnppXZ5Gqi1X0aHprbLYWhz -HpkvBaHHxwKBgAGx1PzHo8FMYbcIPU7JjQNrpVh0VqMLywt4jbUX2hVGkBHY0p4N -zhES5ip1V1jpx041auITUoZYZgH5PMFC6GGEcLSMyGulT1CK4M/UhNLKEEi1vHbO -bJ5ZxMwpyBn4yFhPI1k+vgoGstoUijbJY54YbxfDbEs/5xUCpq4hPzLtAoGAZVBr -3AKwnMgJZxz9u7z8D+bZhdCYHJsh5ZSY4ZkI44f6mD0pV0uixj2AlyLVsTn/nIy4 -13eYc3c2Jl2b4jC1IHr+jbm2tz0exmUGOI7lyjgdvaJveOAFqPVuq7IB9bOCl3MB -sTURkPVJtqv5yhWYqcPefQpLokMg5nM+xpcejKMCgYEAqv5gj3ez0HmLv/9k86Zs -8/780lNcYnB1dQYNJ7g3T6wu8WVGNtzOdPXGTMX9sbv9Smq0cZLZKNMtXDsc5aJT -5yzysPkDxqSK4vJmng74aHUI2HW+HvPqWLZnXC0IYGFvN8KyVkdp/FyhQMNMp6ip -5rgp5RpXJk5MhvvlYdZrz5Q= ------END PRIVATE KEY----- diff --git a/examples/echo/src/echo-tls/client.rs b/examples/echo/src/echo-tls/client.rs deleted file mode 100644 index 70e4e8f0..00000000 --- a/examples/echo/src/echo-tls/client.rs +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use dubbo::codegen::*; -use example_echo::generated::generated::{echo_client::EchoClient, EchoRequest}; -use futures_util::StreamExt; - -pub struct FakeFilter {} - -impl Filter for FakeFilter { - fn call(&mut self, req: Request<()>) -> Result, dubbo::status::Status> { - println!("fake filter: {:?}", req.metadata); - Ok(req) - } -} - -#[tokio::main] -async fn main() { - dubbo_logger::init(); - - let builder = ClientBuilder::from_static(&"https://127.0.0.1:8889").with_timeout(1000000); - let mut cli = EchoClient::new(builder); - - let resp = cli - .unary_echo(Request::new(EchoRequest { - message: "message from tls-client".to_string(), - })) - .await; - let resp = match resp { - Ok(resp) => resp, - Err(err) => return println!("{:?}", err), - }; - let (_parts, body) = resp.into_parts(); - println!("Response: {:?}", body); - - let data = vec![ - EchoRequest { - message: "msg1 from tls-client streaming".to_string(), - }, - EchoRequest { - message: "msg2 from tls-client streaming".to_string(), - }, - EchoRequest { - message: "msg3 from tls-client streaming".to_string(), - }, - ]; - let req = futures_util::stream::iter(data); - let resp = cli.client_streaming_echo(req).await; - let client_streaming_resp = match resp { - Ok(resp) => resp, - Err(err) => return println!("{:?}", err), - }; - let (_parts, resp_body) = client_streaming_resp.into_parts(); - println!("tls-client streaming, Response: {:?}", resp_body); - - let data = vec![ - EchoRequest { - message: "msg1 from tls-client".to_string(), - }, - EchoRequest { - message: "msg2 from tls-client".to_string(), - }, - EchoRequest { - message: "msg3 from tls-client".to_string(), - }, - ]; - let req = futures_util::stream::iter(data); - - let bidi_resp = cli.bidirectional_streaming_echo(req).await.unwrap(); - - let (parts, mut body) = bidi_resp.into_parts(); - println!("parts: {:?}", parts); - while let Some(item) = body.next().await { - match item { - Ok(v) => { - println!("reply: {:?}", v); - } - Err(err) => { - println!("err: {:?}", err); - } - } - } - let trailer = body.trailer().await.unwrap(); - println!("trailer: {:?}", trailer); - - let resp = cli - .server_streaming_echo(Request::new(EchoRequest { - message: "server streaming req".to_string(), - })) - .await - .unwrap(); - - let (parts, mut body) = resp.into_parts(); - println!("parts: {:?}", parts); - while let Some(item) = body.next().await { - match item { - Ok(v) => { - println!("reply: {:?}", v); - } - Err(err) => { - println!("err: {:?}", err); - } - } - } - let trailer = body.trailer().await.unwrap(); - println!("trailer: {:?}", trailer); -} diff --git a/examples/echo/src/echo-tls/server.rs b/examples/echo/src/echo-tls/server.rs deleted file mode 100644 index c7bcd153..00000000 --- a/examples/echo/src/echo-tls/server.rs +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use std::{io::ErrorKind, pin::Pin}; - -use async_trait::async_trait; -use dubbo::filter::{context::ContextFilter, timeout::TimeoutFilter}; -use futures_util::{Stream, StreamExt}; -use tokio::sync::mpsc; -use tokio_stream::wrappers::ReceiverStream; - -use dubbo::codegen::*; -use example_echo::generated::generated::{ - echo_server::{register_server, Echo, EchoServer}, - EchoRequest, EchoResponse, -}; - -type ResponseStream = - Pin> + Send>>; - -#[derive(Clone)] -pub struct FakeFilter {} - -impl Filter for FakeFilter { - fn call(&mut self, req: Request<()>) -> Result, dubbo::status::Status> { - println!("server fake filter: {:?}", req.metadata); - Ok(req) - } -} - -#[tokio::main] -async fn main() { - dubbo_logger::init(); - register_server(EchoServerImpl { - name: "echo".to_string(), - }); - let server = EchoServerImpl::default(); - let s = EchoServer::::with_filter(server, FakeFilter {}); - let timeout_filter = FilterService::new(s, TimeoutFilter {}); - let context_filter = FilterService::new(timeout_filter, ContextFilter {}); - - dubbo::protocol::triple::TRIPLE_SERVICES - .write() - .unwrap() - .insert( - "grpc.examples.echo.Echo".to_string(), - dubbo::utils::boxed_clone::BoxCloneService::new(context_filter), - ); - - let builder = ServerBuilder::new() - .with_listener("tcp".to_string()) - .with_tls("fixtures/server.crt", "fixtures/server.key") - .with_service_names(vec!["grpc.examples.echo.Echo".to_string()]) - .with_addr("127.0.0.1:8889"); - builder.build().serve().await.unwrap(); -} - -#[allow(dead_code)] -#[derive(Default, Clone)] -struct EchoServerImpl { - name: String, -} - -// #[async_trait] -#[async_trait] -impl Echo for EchoServerImpl { - async fn unary_echo( - &self, - req: Request, - ) -> Result, dubbo::status::Status> { - println!("EchoServer::hello {:?}", req.metadata); - - Ok(Response::new(EchoResponse { - message: "hello, dubbo-rust".to_string(), - })) - } - - type ServerStreamingEchoStream = ResponseStream; - - async fn server_streaming_echo( - &self, - req: Request, - ) -> Result, dubbo::status::Status> { - println!("server_streaming_echo: {:?}", req.into_inner()); - - let data = vec![ - Result::<_, dubbo::status::Status>::Ok(EchoResponse { - message: "msg1 from tls-server".to_string(), - }), - Result::<_, dubbo::status::Status>::Ok(EchoResponse { - message: "msg2 from tls-server".to_string(), - }), - Result::<_, dubbo::status::Status>::Ok(EchoResponse { - message: "msg3 from tls-server".to_string(), - }), - ]; - let resp = futures_util::stream::iter(data); - - Ok(Response::new(Box::pin(resp))) - } - async fn client_streaming_echo( - &self, - req: Request>, - ) -> Result, dubbo::status::Status> { - let mut s = req.into_inner(); - loop { - let result = s.next().await; - match result { - Some(Ok(val)) => println!("result: {:?}", val), - Some(Err(val)) => println!("err: {:?}", val), - None => break, - } - } - Ok(Response::new(EchoResponse { - message: "hello tls-client streaming".to_string(), - })) - } - - type BidirectionalStreamingEchoStream = ResponseStream; - - async fn bidirectional_streaming_echo( - &self, - request: Request>, - ) -> Result, dubbo::status::Status> { - println!( - "EchoServer::bidirectional_streaming_echo, grpc header: {:?}", - request.metadata - ); - - let mut in_stream = request.into_inner(); - let (tx, rx) = mpsc::channel(128); - - // this spawn here is required if you want to handle connection error. - // If we just map `in_stream` and write it back as `out_stream` the `out_stream` - // will be drooped when connection error occurs and error will never be propagated - // to mapped version of `in_stream`. - tokio::spawn(async move { - while let Some(result) = in_stream.next().await { - match result { - Ok(v) => { - // if v.name.starts_with("msg2") { - // tx.send(Err(dubbo::status::Status::internal(format!("err: args is invalid, {:?}", v.name)) - // )).await.expect("working rx"); - // continue; - // } - tx.send(Ok(EchoResponse { - message: format!("server reply: {:?}", v.message), - })) - .await - .expect("working rx") - } - Err(err) => { - if let Some(io_err) = match_for_io_error(&err) { - if io_err.kind() == ErrorKind::BrokenPipe { - // here you can handle special case when client - // disconnected in unexpected way - eprintln!("\tclient disconnected: broken pipe"); - break; - } - } - - match tx.send(Err(err)).await { - Ok(_) => (), - Err(_err) => break, // response was droped - } - } - } - } - println!("\tstream ended"); - }); - - // echo just write the same data that was received - let out_stream = ReceiverStream::new(rx); - - Ok(Response::new( - Box::pin(out_stream) as Self::BidirectionalStreamingEchoStream - )) - } -} - -fn match_for_io_error(err_status: &dubbo::status::Status) -> Option<&std::io::Error> { - let mut err: &(dyn std::error::Error + 'static) = err_status; - - loop { - if let Some(io_err) = err.downcast_ref::() { - return Some(io_err); - } - - err = match err.source() { - Some(err) => err, - None => return None, - }; - } -} diff --git a/examples/echo/src/echo/client.rs b/examples/echo/src/echo/client.rs index 1a210007..0a2f150b 100644 --- a/examples/echo/src/echo/client.rs +++ b/examples/echo/src/echo/client.rs @@ -71,7 +71,7 @@ async fn main() { }; let (_parts, resp_body) = client_streaming_resp.into_parts(); println!("client streaming, Response: {:?}", resp_body); - // + let data = vec![ EchoRequest { message: "msg1 from client".to_string(), diff --git a/examples/echo/src/generated/grpc.examples.echo.rs b/examples/echo/src/generated/grpc.examples.echo.rs index 0a746559..4521c9f8 100644 --- a/examples/echo/src/generated/grpc.examples.echo.rs +++ b/examples/echo/src/generated/grpc.examples.echo.rs @@ -1,6 +1,4 @@ /// EchoRequest is the request for echo. -#[derive(serde::Serialize, serde::Deserialize)] -#[serde(default)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EchoRequest { @@ -8,8 +6,6 @@ pub struct EchoRequest { pub message: ::prost::alloc::string::String, } /// EchoResponse is the response for echo. -#[derive(serde::Serialize, serde::Deserialize)] -#[serde(default)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EchoResponse { @@ -21,7 +17,7 @@ pub mod echo_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use dubbo::codegen::*; /// Echo is the echo service. - #[derive(Debug, Clone, Default)] + #[derive(Clone)] pub struct EchoClient { inner: TripleClient, } @@ -40,50 +36,64 @@ pub mod echo_client { &mut self, request: Request, ) -> Result, dubbo::status::Status> { + let codec = + dubbo::codegen::ProstCodec::::default(); let invocation = RpcInvocation::default() .with_service_unique_name(String::from("grpc.examples.echo.Echo")) .with_method_name(String::from("UnaryEcho")); let path = http::uri::PathAndQuery::from_static("/grpc.examples.echo.Echo/UnaryEcho"); - self.inner.unary(request, path, invocation).await + self.inner.unary(request, codec, path, invocation).await } /// ServerStreamingEcho is server side streaming. pub async fn server_streaming_echo( &mut self, request: Request, ) -> Result>, dubbo::status::Status> { + let codec = + dubbo::codegen::ProstCodec::::default(); let invocation = RpcInvocation::default() .with_service_unique_name(String::from("grpc.examples.echo.Echo")) .with_method_name(String::from("ServerStreamingEcho")); let path = http::uri::PathAndQuery::from_static( "/grpc.examples.echo.Echo/ServerStreamingEcho", ); - self.inner.server_streaming(request, path, invocation).await + self.inner + .server_streaming(request, codec, path, invocation) + .await } /// ClientStreamingEcho is client side streaming. pub async fn client_streaming_echo( &mut self, request: impl IntoStreamingRequest, ) -> Result, dubbo::status::Status> { + let codec = + dubbo::codegen::ProstCodec::::default(); let invocation = RpcInvocation::default() .with_service_unique_name(String::from("grpc.examples.echo.Echo")) .with_method_name(String::from("ClientStreamingEcho")); let path = http::uri::PathAndQuery::from_static( "/grpc.examples.echo.Echo/ClientStreamingEcho", ); - self.inner.client_streaming(request, path, invocation).await + self.inner + .client_streaming(request, codec, path, invocation) + .await } /// BidirectionalStreamingEcho is bidi streaming. pub async fn bidirectional_streaming_echo( &mut self, request: impl IntoStreamingRequest, ) -> Result>, dubbo::status::Status> { + let codec = + dubbo::codegen::ProstCodec::::default(); let invocation = RpcInvocation::default() .with_service_unique_name(String::from("grpc.examples.echo.Echo")) .with_method_name(String::from("BidirectionalStreamingEcho")); let path = http::uri::PathAndQuery::from_static( "/grpc.examples.echo.Echo/BidirectionalStreamingEcho", ); - self.inner.bidi_streaming(request, path, invocation).await + self.inner + .bidi_streaming(request, codec, path, invocation) + .await } } } @@ -172,8 +182,10 @@ pub mod echo_server { } } let fut = async move { - let mut server = - TripleServer::::new(); + let mut server = TripleServer::new(dubbo::codegen::ProstCodec::< + super::EchoResponse, + super::EchoRequest, + >::default()); let res = server.unary(UnaryEchoServer { inner }, req).await; Ok(res) }; @@ -196,8 +208,10 @@ pub mod echo_server { } } let fut = async move { - let mut server = - TripleServer::::new(); + let mut server = TripleServer::new(dubbo::codegen::ProstCodec::< + super::EchoResponse, + super::EchoRequest, + >::default()); let res = server .server_streaming(ServerStreamingEchoServer { inner }, req) .await; @@ -223,8 +237,10 @@ pub mod echo_server { } } let fut = async move { - let mut server = - TripleServer::::new(); + let mut server = TripleServer::new(dubbo::codegen::ProstCodec::< + super::EchoResponse, + super::EchoRequest, + >::default()); let res = server .client_streaming(ClientStreamingEchoServer { inner }, req) .await; @@ -253,8 +269,10 @@ pub mod echo_server { } } let fut = async move { - let mut server = - TripleServer::::new(); + let mut server = TripleServer::new(dubbo::codegen::ProstCodec::< + super::EchoResponse, + super::EchoRequest, + >::default()); let res = server .bidi_streaming(BidirectionalStreamingEchoServer { inner }, req) .await; diff --git a/examples/greeter/Cargo.toml b/examples/greeter/Cargo.toml index 37faf91a..d68cab7b 100644 --- a/examples/greeter/Cargo.toml +++ b/examples/greeter/Cargo.toml @@ -24,11 +24,8 @@ http = "0.2" http-body = "0.4.4" futures-util = { version = "0.3", default-features = false } tokio = { version = "1.0", features = ["rt-multi-thread", "time", "fs", "macros", "net", "signal"] } -prost-derive = { version = "0.11.9", optional = true } -prost = "0.11.9" -serde = { version = "1.0.171",features = ["derive"] } -prost-serde = "0.3.0" -prost-serde-derive = "0.1.2" +prost-derive = { version = "0.10", optional = true } +prost = "0.10.4" async-trait = "0.1.56" tokio-stream = "0.1" dubbo-logger = { path = "../../common/logger" } diff --git a/examples/greeter/src/greeter/client.rs b/examples/greeter/src/greeter/client.rs index eed3e522..cb92ed0b 100644 --- a/examples/greeter/src/greeter/client.rs +++ b/examples/greeter/src/greeter/client.rs @@ -22,33 +22,20 @@ pub mod protos { use std::env; -use dubbo::codegen::*; +use dubbo::{codegen::*, registry::n_registry::ArcRegistry}; use dubbo_base::Url; use futures_util::StreamExt; use protos::{greeter_client::GreeterClient, GreeterRequest}; use registry_nacos::NacosRegistry; -use registry_zookeeper::ZookeeperRegistry; #[tokio::main] async fn main() { dubbo_logger::init(); - let mut builder = ClientBuilder::new(); - - if let Ok(zk_servers) = env::var("ZOOKEEPER_SERVERS") { - let zkr = ZookeeperRegistry::new(&zk_servers); - let directory = RegistryDirectory::new(Box::new(zkr)); - builder = builder.with_directory(Box::new(directory)); - } else if let Ok(nacos_url_str) = env::var("NACOS_URL") { - // NACOS_URL=nacos://mse-96efa264-p.nacos-ans.mse.aliyuncs.com - let nacos_url = Url::from_url(&nacos_url_str).unwrap(); - let registry = NacosRegistry::new(nacos_url); - let directory = RegistryDirectory::new(Box::new(registry)); - builder = builder.with_directory(Box::new(directory)); - } else { - builder = builder.with_host("http://127.0.0.1:8888"); - } + let builder = ClientBuilder::new().with_registry(ArcRegistry::new(NacosRegistry::new( + Url::from_url("nacos://127.0.0.1:8848").unwrap(), + ))); let mut cli = GreeterClient::new(builder); @@ -60,7 +47,7 @@ async fn main() { .await; let resp = match resp { Ok(resp) => resp, - Err(err) => return println!("{:?}", err), + Err(err) => return println!("response error: {:?}", err), }; let (_parts, body) = resp.into_parts(); println!("Response: {:?}", body); diff --git a/examples/greeter/src/greeter/server.rs b/examples/greeter/src/greeter/server.rs index f143e25b..fd436e52 100644 --- a/examples/greeter/src/greeter/server.rs +++ b/examples/greeter/src/greeter/server.rs @@ -18,11 +18,13 @@ use std::{io::ErrorKind, pin::Pin}; use async_trait::async_trait; +use dubbo_base::Url; use futures_util::{Stream, StreamExt}; +use registry_nacos::NacosRegistry; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; -use dubbo::{codegen::*, Dubbo}; +use dubbo::{codegen::*, registry::n_registry::ArcRegistry, Dubbo}; use dubbo_config::RootConfig; use dubbo_logger::{ tracing::{info, span}, @@ -50,15 +52,18 @@ async fn main() { register_server(GreeterServerImpl { name: "greeter".to_string(), }); - let zkr = ZookeeperRegistry::default(); + // let zkr: ZookeeperRegistry = ZookeeperRegistry::default(); let r = RootConfig::new(); let r = match r.load() { Ok(config) => config, Err(_err) => panic!("err: {:?}", _err), // response was droped }; + + let nacos_registry = NacosRegistry::new(Url::from_url("nacos://127.0.0.1:8848").unwrap()); let mut f = Dubbo::new() .with_config(r) - .add_registry("zookeeper", Box::new(zkr)); + .add_registry("nacos-registry", ArcRegistry::new(nacos_registry)); + f.start().await; } @@ -76,7 +81,7 @@ impl Greeter for GreeterServerImpl { request: Request, ) -> Result, dubbo::status::Status> { info!("GreeterServer::greet {:?}", request.metadata); - println!("{:?}", request.into_inner()); + Ok(Response::new(GreeterReply { message: "hello, dubbo-rust".to_string(), })) diff --git a/examples/interface/Cargo.toml b/examples/interface/Cargo.toml new file mode 100644 index 00000000..4db6d7c3 --- /dev/null +++ b/examples/interface/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "example-interface" +version = "0.3.0" +edition = "2021" +license = "Apache-2.0" +description = "dubbo-rust-examples-interface" +repository = "https://github.com/apache/dubbo-rust.git" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[package.metadata.release] +release = false + +[[bin]] +name = "interface-server" +path = "src/server.rs" + +[[bin]] +name = "interface-client" +path = "src/client.rs" + +[dependencies] +http = "0.2" +http-body = "0.4.4" +futures-util = {version = "0.3", default-features = false} +tokio = { version = "1.0", features = [ "rt-multi-thread", "time", "fs", "macros", "net", "signal"] } +prost-derive = {version = "0.10", optional = true} +prost = "0.10.4" +async-trait = "0.1.56" +tokio-stream = "0.1" +dubbo-logger.workspace=true + +dubbo-base = { path = "../../common/base"} +dubbo-macro = { path = "../../common/macro", version = "0.3.0" } +dubbo = {path = "../../dubbo"} +dubbo-config = {path = "../../config", version = "0.3.0" } +registry-zookeeper.workspace=true +registry-nacos.workspace = true +serde = { version = "1.0.196", features = ["derive"] } +serde_json = "1" +url = "2.5.0" diff --git a/examples/interface/LICENSE b/examples/interface/LICENSE new file mode 100644 index 00000000..75b52484 --- /dev/null +++ b/examples/interface/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/examples/interface/README.md b/examples/interface/README.md new file mode 100644 index 00000000..175c55fb --- /dev/null +++ b/examples/interface/README.md @@ -0,0 +1,22 @@ +# Apache Dubbo-rust example - interface + +## build and run + +```sh +$ cd github.com/apache/dubbo-rust/examples/interface/ +$ cargo build + +$ # run sever +$ ../../target/debug/interface-server + +$ # run client +$ ../../target/debug/interface-client + +# client stream +server response : Ok("Hello world1") +server response : Ok(ResDto { str: "Hello world2:world3 V2" }) + +# server stream +client request : "world1" +client request : ReqDto { str: "world2" } : ReqDto { str: "world3" } +``` diff --git a/examples/interface/README_CN.md b/examples/interface/README_CN.md new file mode 100644 index 00000000..711c74bc --- /dev/null +++ b/examples/interface/README_CN.md @@ -0,0 +1,21 @@ +# Apache Dubbo-rust 示例 - greeter + +## 构建并运行 + +```sh +$ cd github.com/apache/dubbo-rust/examples/interface/ +$ cargo build + +$ # run sever +$ ../../target/debug/interface-server + +$ # run client +$ ../../target/debug/interface-client + +# client stream +server response : Ok("Hello world1") +server response : Ok(ResDto { str: "Hello world2:world3 V2" }) + +# server stream +client request : "world1" +client request : ReqDto { str: "world2" } : ReqDto { str: "world3" } diff --git a/examples/interface/application.yaml b/examples/interface/application.yaml new file mode 100644 index 00000000..f6974286 --- /dev/null +++ b/examples/interface/application.yaml @@ -0,0 +1,25 @@ +logging: + level: debug +dubbo: + protocols: + triple: + ip: 0.0.0.0 + port: '8888' + name: tri + registries: + demoZK: + protocol: zookeeper + address: 0.0.0.0:2181 + provider: + services: + DemoServiceImpl: + version: 1.0.0 + group: test + protocol: triple + serialization : fastjson + interface: org.apache.dubbo.springboot.demo.DemoService + routers: + consumer: + - service: "org.apache.dubbo.springboot.demo.DemoService" + url: tri://127.0.0.1:20000 + protocol: triple \ No newline at end of file diff --git a/dubbo/src/cluster/loadbalance/mod.rs b/examples/interface/src/client.rs similarity index 50% rename from dubbo/src/cluster/loadbalance/mod.rs rename to examples/interface/src/client.rs index 1d04f7fc..cf192ae3 100644 --- a/dubbo/src/cluster/loadbalance/mod.rs +++ b/examples/interface/src/client.rs @@ -14,29 +14,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -use std::collections::HashMap; -use lazy_static::lazy_static; +use dubbo::codegen::ClientBuilder; +use dubbo::registry::n_registry::ArcRegistry; +use example_interface::{DemoServiceRpc, ReqDto}; +use registry_zookeeper::ZookeeperRegistry; -use crate::cluster::loadbalance::{ - impls::{random::RandomLoadBalance, roundrobin::RoundRobinLoadBalance}, - types::BoxLoadBalance, -}; - -pub mod impls; -pub mod types; - -lazy_static! { - pub static ref LOAD_BALANCE_EXTENSIONS: HashMap = - init_loadbalance_extensions(); -} - -fn init_loadbalance_extensions() -> HashMap { - let mut loadbalance_map: HashMap = HashMap::new(); - loadbalance_map.insert("random".to_string(), Box::new(RandomLoadBalance::default())); - loadbalance_map.insert( - "roundrobin".to_string(), - Box::new(RoundRobinLoadBalance::default()), - ); - loadbalance_map +#[tokio::main] +async fn main() { + // dubbo_logger::init(); + let builder = ClientBuilder::new() + .with_registry(ArcRegistry::new(ZookeeperRegistry::new("127.0.0.1:2181"))); + let mut client = DemoServiceRpc::new(builder); + let res = client.sayHello("world1".to_string()).await; + println!("server response : {:?}", res); + let res = client + .sayHelloV2( + ReqDto { + str: "world2".to_string(), + }, + ReqDto { + str: "world3".to_string(), + }, + ) + .await; + println!("server response : {:?}", res); } diff --git a/dubbo/src/cluster/loadbalance/impls/mod.rs b/examples/interface/src/lib.rs similarity index 63% rename from dubbo/src/cluster/loadbalance/impls/mod.rs rename to examples/interface/src/lib.rs index 5a84af8c..bb2f74c9 100644 --- a/dubbo/src/cluster/loadbalance/impls/mod.rs +++ b/examples/interface/src/lib.rs @@ -14,5 +14,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -pub mod random; -pub mod roundrobin; + +use dubbo_macro::rpc_trait; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Default, Debug)] +pub struct ReqDto { + pub str: String, +} + +#[derive(Serialize, Deserialize, Default, Debug)] +pub struct ResDto { + pub str: String, +} + +#[rpc_trait(package = "org.apache.dubbo.springboot.demo")] +pub trait DemoService { + async fn sayHello(&self, name: String) -> String; + + async fn sayHelloV2(&self, name: ReqDto, name2: ReqDto) -> ResDto; +} diff --git a/examples/interface/src/server.rs b/examples/interface/src/server.rs new file mode 100644 index 00000000..9b8bbd37 --- /dev/null +++ b/examples/interface/src/server.rs @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use dubbo::registry::n_registry::ArcRegistry; +use dubbo::Dubbo; +use dubbo_config::RootConfig; +use dubbo_macro::rpc_server; +use example_interface::{DemoService, ReqDto, ResDto}; +use registry_zookeeper::ZookeeperRegistry; +use std::env; + +#[derive(Clone)] +struct DemoServiceImpl { + _db: String, +} + +#[rpc_server(package = "org.apache.dubbo.springboot.demo")] +impl DemoService for DemoServiceImpl { + async fn sayHello(&self, req: String) -> Result { + println!("client request : {:?}", req); + return Ok("Hello ".to_owned() + &req); + } + async fn sayHelloV2(&self, req: ReqDto, req2: ReqDto) -> Result { + println!("client request : {:?} : {:?}", req, req2); + return Ok(ResDto { + str: "Hello ".to_owned() + &req.str + ":" + &req2.str + " V2", + }); + } +} + +#[tokio::main] +async fn main() { + // dubbo_logger::init(); + env::set_var("DUBBO_CONFIG_PATH", "examples/interface/application.yaml"); + let r = RootConfig::new(); + let r = match r.load() { + Ok(config) => config, + Err(_err) => panic!("err: {:?}", _err), // response was droped + }; + let server = DemoServiceImpl { + _db: "a db".to_string(), + }; + let zookeeper_registry = ZookeeperRegistry::new("127.0.0.1:2181"); + let mut f = Dubbo::new() + .with_config(r) + .add_registry("zookeeper-registry", ArcRegistry::new(zookeeper_registry)) + .register_server(server); + f.start().await; +} diff --git a/registry/nacos/Cargo.toml b/registry/nacos/Cargo.toml index 2437bc0b..1fa6c196 100644 --- a/registry/nacos/Cargo.toml +++ b/registry/nacos/Cargo.toml @@ -9,13 +9,15 @@ repository = "https://github.com/apache/dubbo-rust.git" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -nacos-sdk = { version = "0.2.3", features = ["naming", "auth-by-http"] } +nacos-sdk = { version = "0.3", features = ["naming", "auth-by-http", "async"] } dubbo.workspace = true serde_json.workspace = true serde = { workspace = true, features = ["derive"] } anyhow.workspace = true dubbo-logger.workspace = true dubbo-base.workspace = true +tokio.workspace = true +async-trait.workspace = true [dev-dependencies] tracing-subscriber = "0.3.16" diff --git a/registry/nacos/src/lib.rs b/registry/nacos/src/lib.rs index bbbaaa2f..ad34237e 100644 --- a/registry/nacos/src/lib.rs +++ b/registry/nacos/src/lib.rs @@ -16,16 +16,20 @@ */ mod utils; +use async_trait::async_trait; use dubbo_base::Url; -use std::{ - collections::{HashMap, HashSet}, - sync::{Arc, Mutex}, -}; +use std::{collections::HashMap, sync::Arc}; +use tokio::{select, sync::mpsc}; use anyhow::anyhow; -use dubbo::registry::{NotifyListener, Registry, RegistryNotifyListener, ServiceEvent}; -use dubbo_logger::tracing::{error, info, warn}; -use nacos_sdk::api::naming::{NamingService, NamingServiceBuilder, ServiceInstance}; +use dubbo::{ + registry::n_registry::{DiscoverStream, Registry, ServiceChange}, + StdError, +}; +use dubbo_logger::tracing::{debug, error, info}; +use nacos_sdk::api::naming::{ + NamingEventListener, NamingService, NamingServiceBuilder, ServiceInstance, +}; use crate::utils::{build_nacos_client_props, is_concrete_str, is_wildcard_str, match_range}; @@ -60,7 +64,6 @@ const INNERCLASS_COMPATIBLE_SYMBOL: &str = "___"; pub struct NacosRegistry { nacos_naming_service: Arc, - listeners: Mutex>>>, } impl NacosRegistry { @@ -77,49 +80,6 @@ impl NacosRegistry { Self { nacos_naming_service: Arc::new(nacos_naming_service), - listeners: Mutex::new(HashMap::new()), - } - } - - #[allow(dead_code)] - fn get_subscribe_service_names(&self, service_name: &NacosServiceName) -> HashSet { - if service_name.is_concrete() { - let mut set = HashSet::new(); - let service_subscribe_name = service_name.to_subscriber_str(); - let service_subscriber_legacy_name = service_name.to_subscriber_legacy_string(); - if service_subscribe_name.eq(&service_subscriber_legacy_name) { - set.insert(service_subscribe_name); - } else { - set.insert(service_subscribe_name); - set.insert(service_subscriber_legacy_name); - } - - set - } else { - let list_view = self.nacos_naming_service.get_service_list( - 1, - i32::MAX, - Some( - service_name - .get_group_with_default(DEFAULT_GROUP) - .to_string(), - ), - ); - if let Err(e) = list_view { - error!("list service instances occur an error: {:?}", e); - return HashSet::default(); - } - - let list_view = list_view.unwrap(); - let set: HashSet = list_view - .0 - .into_iter() - .filter(|service_name| service_name.split(SERVICE_NAME_SEPARATOR).count() == 4) - .map(|service_name| NacosServiceName::from_service_name_str(&service_name)) - .filter(|other_service_name| service_name.is_compatible(other_service_name)) - .map(|service_name| service_name.to_subscriber_str()) - .collect(); - set } } } @@ -135,20 +95,66 @@ impl NacosRegistry { ..Default::default() } } + + fn diff<'a>( + old_service: &'a Vec, + new_services: &'a Vec, + ) -> (Vec<&'a ServiceInstance>, Vec<&'a ServiceInstance>) { + let new_hosts_map: HashMap = new_services + .iter() + .map(|hosts| (hosts.ip_and_port(), hosts)) + .collect(); + + let old_hosts_map: HashMap = old_service + .iter() + .map(|hosts| (hosts.ip_and_port(), hosts)) + .collect(); + + let mut add_hosts = Vec::<&ServiceInstance>::new(); + let mut removed_hosts = Vec::<&ServiceInstance>::new(); + + for (key, new_host) in new_hosts_map.iter() { + let old_host = old_hosts_map.get(key); + match old_host { + None => { + add_hosts.push(*new_host); + } + Some(old_host) => { + if !old_host.is_same_instance(new_host) { + removed_hosts.push(*old_host); + add_hosts.push(*new_host); + } + } + } + } + + for (key, old_host) in old_hosts_map.iter() { + let new_host = new_hosts_map.get(key); + match new_host { + None => { + removed_hosts.push(*old_host); + } + Some(_) => {} + } + } + + (removed_hosts, add_hosts) + } } +#[async_trait] impl Registry for NacosRegistry { - fn register(&mut self, url: Url) -> Result<(), dubbo::StdError> { - let side = url.get_param(SIDE_KEY).unwrap_or_default(); - let register_consumer = url - .get_param(REGISTER_CONSUMER_URL_KEY) - .unwrap_or_else(|| false.to_string()) - .parse::() - .unwrap_or(false); - if side.ne(PROVIDER_SIDE) && !register_consumer { - warn!("Please set 'dubbo.registry.parameters.register-consumer-url=true' to turn on consumer url registration."); - return Ok(()); - } + async fn register(&self, url: Url) -> Result<(), dubbo::StdError> { + // let side = url.get_param(SIDE_KEY).unwrap_or_default(); + // let register_consumer = url + // .get_param(REGISTER_CONSUMER_URL_KEY) + // .unwrap_or_else(|| false.to_string()) + // .parse::() + // .unwrap_or(false); + // if side.ne(PROVIDER_SIDE) && !register_consumer { + // warn!("Please set 'dubbo.registry.parameters.register-consumer-url=true' to turn on consumer url registration."); + // return Ok(()); + // } let nacos_service_name = NacosServiceName::new(&url); @@ -162,11 +168,10 @@ impl Registry for NacosRegistry { let nacos_service_instance = Self::create_nacos_service_instance(url); info!("register service: {}", nacos_service_name); - let ret = self.nacos_naming_service.register_instance( - nacos_service_name, - group_name, - nacos_service_instance, - ); + let ret = self + .nacos_naming_service + .register_instance(nacos_service_name, group_name, nacos_service_instance) + .await; if let Err(e) = ret { error!("register to nacos occur an error: {:?}", e); return Err(anyhow!("register to nacos occur an error: {:?}", e).into()); @@ -175,7 +180,7 @@ impl Registry for NacosRegistry { Ok(()) } - fn unregister(&mut self, url: Url) -> Result<(), dubbo::StdError> { + async fn unregister(&self, url: Url) -> Result<(), dubbo::StdError> { let nacos_service_name = NacosServiceName::new(&url); let group_name = Some( @@ -189,11 +194,10 @@ impl Registry for NacosRegistry { info!("deregister service: {}", nacos_service_name); - let ret = self.nacos_naming_service.deregister_instance( - nacos_service_name, - group_name, - nacos_service_instance, - ); + let ret = self + .nacos_naming_service + .deregister_instance(nacos_service_name, group_name, nacos_service_instance) + .await; if let Err(e) = ret { error!("deregister service from nacos occur an error: {:?}", e); return Err(anyhow!("deregister service from nacos occur an error: {:?}", e).into()); @@ -201,101 +205,161 @@ impl Registry for NacosRegistry { Ok(()) } - fn subscribe(&self, url: Url, listener: RegistryNotifyListener) -> Result<(), dubbo::StdError> { + async fn subscribe(&self, url: Url) -> Result { let service_name = NacosServiceName::new(&url); - let url_str = url.to_url(); - - info!("subscribe: {}", &url_str); + let service_group = service_name + .get_group_with_default(DEFAULT_GROUP) + .to_string(); + let subscriber_url = service_name.to_subscriber_str(); + info!("subscribe: {}", subscriber_url); + + let (listener, mut change_receiver) = ServiceChangeListener::new(); + let arc_listener = Arc::new(listener); + + let (discover_tx, discover_rx) = mpsc::channel(64); + + let nacos_naming_service = self.nacos_naming_service.clone(); + + let listener_in_task = arc_listener.clone(); + let service_group_in_task = service_group.clone(); + let subscriber_url_in_task = subscriber_url.clone(); + tokio::spawn(async move { + let listener = listener_in_task; + let service_group = service_group_in_task; + let subscriber_url = subscriber_url_in_task; + + let mut current_instances = Vec::new(); + loop { + let change = select! { + _ = discover_tx.closed() => { + debug!("service {} change task quit, unsubscribe.", subscriber_url); + None + }, + change = change_receiver.recv() => change + }; + + match change { + Some(instances) => { + debug!("service {} changed", subscriber_url); + let (remove_instances, add_instances) = + NacosRegistry::diff(¤t_instances, &instances); + + for instance in remove_instances { + let service_name = instance.service_name.as_ref(); + let url = match service_name { + None => { + format!("triple://{}:{}", instance.ip(), instance.port()) + } + Some(service_name) => { + format!( + "triple://{}:{}/{}", + instance.ip(), + instance.port(), + service_name + ) + } + }; + + match discover_tx.send(Ok(ServiceChange::Remove(url))).await { + Ok(_) => {} + Err(e) => { + error!( + "send service change failed: {:?}, maybe user unsubscribe", + e + ); + break; + } + } + } + + for instance in add_instances { + let service_name = instance.service_name.as_ref(); + let url = match service_name { + None => { + format!("triple://{}:{}", instance.ip(), instance.port()) + } + Some(service_name) => { + format!( + "triple://{}:{}/{}", + instance.ip(), + instance.port(), + service_name + ) + } + }; + + match discover_tx.send(Ok(ServiceChange::Insert(url, ()))).await { + Ok(_) => {} + Err(e) => { + error!( + "send service change failed: {:?}, maybe user unsubscribe", + e + ); + break; + } + } + } + current_instances = instances; + } + None => { + error!( + "receive service change task quit, unsubscribe {}.", + subscriber_url + ); + break; + } + } + } - let nacos_listener: Arc = { - let listeners = self.listeners.lock(); - if let Err(e) = listeners { + debug!("unsubscribe service: {}", subscriber_url); + // unsubscribe + let unsubscribe = nacos_naming_service + .unsubscribe(subscriber_url, Some(service_group), Vec::new(), listener) + .await; + + match unsubscribe { + Ok(_) => {} + Err(e) => { + error!("unsubscribe service failed: {:?}", e); + } + } + }); + + let all_instance = self + .nacos_naming_service + .get_all_instances( + subscriber_url.clone(), + Some(service_group.clone()), + Vec::new(), + false, + ) + .await?; + let _ = arc_listener.changed(all_instance); + + match self + .nacos_naming_service + .subscribe( + subscriber_url.clone(), + Some(service_group.clone()), + Vec::new(), + arc_listener, + ) + .await + { + Ok(_) => {} + Err(e) => { error!("subscribe service failed: {:?}", e); return Err(anyhow!("subscribe service failed: {:?}", e).into()); } - - let mut listeners = listeners.unwrap(); - let listener_set = listeners.get_mut(url_str.as_str()); - - let wrapper = Arc::new(NotifyListenerWrapper(listener)); - if let Some(listener_set) = listener_set { - listener_set.insert(wrapper.clone()); - } else { - let mut hash_set = HashSet::new(); - hash_set.insert(wrapper.clone()); - listeners.insert(url_str, hash_set); - } - - wrapper - }; - - let ret = self.nacos_naming_service.subscribe( - service_name.to_subscriber_str(), - Some( - service_name - .get_group_with_default(DEFAULT_GROUP) - .to_string(), - ), - Vec::new(), - nacos_listener, - ); - - if let Err(e) = ret { - error!("subscribe service failed: {:?}", e); - return Err(anyhow!("subscribe service failed: {:?}", e).into()); } - Ok(()) + Ok(discover_rx) } - fn unsubscribe( - &self, - url: Url, - listener: RegistryNotifyListener, - ) -> Result<(), dubbo::StdError> { + async fn unsubscribe(&self, url: Url) -> Result<(), dubbo::StdError> { let service_name = NacosServiceName::new(&url); - let url_str = url.to_url(); - info!("unsubscribe: {}", &url_str); - - let nacos_listener: Arc = { - let listeners = self.listeners.lock(); - if let Err(e) = listeners { - error!("unsubscribe service failed: {:?}", e); - return Err(anyhow!("unsubscribe service failed: {:?}", e).into()); - } - - let mut listeners = listeners.unwrap(); - let listener_set = listeners.get_mut(url_str.as_str()); - if listener_set.is_none() { - return Ok(()); - } - - let listener_set = listener_set.unwrap(); - - let listener = Arc::new(NotifyListenerWrapper(listener)); - let listener = listener_set.take(&listener); - if listener.is_none() { - return Ok(()); - } - - listener.unwrap() - }; - - let ret = self.nacos_naming_service.unsubscribe( - service_name.to_subscriber_str(), - Some( - service_name - .get_group_with_default(DEFAULT_GROUP) - .to_string(), - ), - Vec::new(), - nacos_listener, - ); - - if let Err(e) = ret { - error!("unsubscribe service failed: {:?}", e); - return Err(anyhow!("unsubscribe service failed: {:?}", e).into()); - } + let subscriber_url = service_name.to_subscriber_str(); + info!("unsubscribe: {}", &subscriber_url); Ok(()) } @@ -484,52 +548,43 @@ impl NacosServiceName { } } -struct NotifyListenerWrapper(Arc); - -impl std::hash::Hash for NotifyListenerWrapper { - fn hash(&self, state: &mut H) { - let ptr = self.0.as_ref(); - std::ptr::hash(ptr, state); - } +struct ServiceChangeListener { + tx: mpsc::Sender>, } -impl PartialEq for NotifyListenerWrapper { - fn eq(&self, other: &Self) -> bool { - let self_ptr = self.0.as_ref() as *const dyn NotifyListener; - let other_ptr = other.0.as_ref() as *const dyn NotifyListener; +impl ServiceChangeListener { + pub fn new() -> (Self, mpsc::Receiver>) { + let (tx, rx) = mpsc::channel(64); + let this = Self { tx }; - let (self_data_ptr, _): (*const u8, *const u8) = unsafe { std::mem::transmute(self_ptr) }; + (this, rx) + } - let (other_data_ptr, _): (*const u8, *const u8) = unsafe { std::mem::transmute(other_ptr) }; - self_data_ptr == other_data_ptr + pub fn changed(&self, instances: Vec) -> Result<(), dubbo::StdError> { + match self.tx.try_send(instances) { + Ok(_) => Ok(()), + Err(e) => { + error!("send service change failed: {:?}", e); + Err(anyhow!("send service change failed: {:?}", e).into()) + } + } } } -impl Eq for NotifyListenerWrapper {} - -impl nacos_sdk::api::naming::NamingEventListener for NotifyListenerWrapper { +impl NamingEventListener for ServiceChangeListener { fn event(&self, event: Arc) { - let service_name = event.service_name.clone(); + debug!("service change {}", event.service_name.clone()); + debug!("nacos event: {:?}", event); + let instances = event.instances.as_ref(); - let urls: Vec; - if let Some(instances) = instances { - urls = instances - .iter() - .filter_map(|data| { - let url_str = - format!("triple://{}:{}/{}", data.ip(), data.port(), service_name); - Url::from_url(&url_str) - }) - .collect(); - } else { - urls = Vec::new(); + match instances { + None => { + let _ = self.changed(Vec::default()); + } + Some(instances) => { + let _ = self.changed(instances.clone()); + } } - let notify_event = ServiceEvent { - key: service_name, - action: String::from("CHANGE"), - service: urls, - }; - self.0.notify(notify_event); } } @@ -543,9 +598,9 @@ pub mod tests { use super::*; - #[test] + #[tokio::test] #[ignore] - pub fn test_register_to_nacos() { + pub async fn test_register_to_nacos() { tracing_subscriber::fmt() .with_thread_names(true) .with_file(true) @@ -556,14 +611,14 @@ pub mod tests { .init(); let nacos_registry_url = Url::from_url("nacos://127.0.0.1:8848/org.apache.dubbo.registry.RegistryService?application=dubbo-demo-triple-api-provider&dubbo=2.0.2&interface=org.apache.dubbo.registry.RegistryService&pid=7015").unwrap(); - let mut registry = NacosRegistry::new(nacos_registry_url); + let registry = NacosRegistry::new(nacos_registry_url); let mut service_url = Url::from_url("tri://127.0.0.1:50052/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&methods=sayHello,sayHelloAsync&pid=7015&service-name-mapping=true&side=provider×tamp=1670060843807").unwrap(); service_url .params .insert(SIDE_KEY.to_owned(), PROVIDER_SIDE.to_owned()); - let ret = registry.register(service_url); + let ret = registry.register(service_url).await; info!("register result: {:?}", ret); @@ -571,9 +626,9 @@ pub mod tests { thread::sleep(sleep_millis); } - #[test] + #[tokio::test] #[ignore] - pub fn test_register_and_unregister() { + pub async fn test_register_and_unregister() { tracing_subscriber::fmt() .with_thread_names(true) .with_file(true) @@ -584,14 +639,14 @@ pub mod tests { .init(); let nacos_registry_url = Url::from_url("nacos://127.0.0.1:8848/org.apache.dubbo.registry.RegistryService?application=dubbo-demo-triple-api-provider&dubbo=2.0.2&interface=org.apache.dubbo.registry.RegistryService&pid=7015").unwrap(); - let mut registry = NacosRegistry::new(nacos_registry_url); + let registry = NacosRegistry::new(nacos_registry_url); let mut service_url = Url::from_url("tri://127.0.0.1:9090/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&methods=sayHello,sayHelloAsync&pid=7015&service-name-mapping=true&side=provider×tamp=1670060843807").unwrap(); service_url .params .insert(SIDE_KEY.to_owned(), PROVIDER_SIDE.to_owned()); - let ret = registry.register(service_url); + let ret = registry.register(service_url).await; info!("register result: {:?}", ret); @@ -599,7 +654,7 @@ pub mod tests { thread::sleep(sleep_millis); let unregister_url = Url::from_url("tri://127.0.0.1:9090/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&methods=sayHello,sayHelloAsync&pid=7015&service-name-mapping=true&side=provider×tamp=1670060843807").unwrap(); - let ret = registry.unregister(unregister_url); + let ret = registry.unregister(unregister_url).await; info!("deregister result: {:?}", ret); @@ -607,20 +662,9 @@ pub mod tests { thread::sleep(sleep_millis); } - struct TestNotifyListener; - impl NotifyListener for TestNotifyListener { - fn notify(&self, event: ServiceEvent) { - info!("notified: {:?}", event.key); - } - - fn notify_all(&self, event: ServiceEvent) { - info!("notify_all: {:?}", event.key); - } - } - - #[test] + #[tokio::test] #[ignore] - fn test_subscribe() { + pub async fn test_subscribe() { tracing_subscriber::fmt() .with_thread_names(true) .with_file(true) @@ -631,33 +675,36 @@ pub mod tests { .init(); let nacos_registry_url = Url::from_url("nacos://127.0.0.1:8848/org.apache.dubbo.registry.RegistryService?application=dubbo-demo-triple-api-provider&dubbo=2.0.2&interface=org.apache.dubbo.registry.RegistryService&pid=7015").unwrap(); - let mut registry = NacosRegistry::new(nacos_registry_url); + let registry = NacosRegistry::new(nacos_registry_url); let mut service_url = Url::from_url("tri://127.0.0.1:50052/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&methods=sayHello,sayHelloAsync&pid=7015&service-name-mapping=true&side=provider×tamp=1670060843807").unwrap(); service_url .params .insert(SIDE_KEY.to_owned(), PROVIDER_SIDE.to_owned()); - let ret = registry.register(service_url); + let ret = registry.register(service_url).await; info!("register result: {:?}", ret); - let subscribe_url = Url::from_url("provider://192.168.0.102:50052/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&bind.ip=192.168.0.102&bind.port=50052&category=configurators&check=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&ipv6=fd00:6cb1:58a2:8ddf:0:0:0:1000&methods=sayHello,sayHelloAsync&pid=44270&service-name-mapping=true&side=provider").unwrap(); - - let ret = registry.subscribe(subscribe_url, Arc::new(TestNotifyListener)); + let subscribe_url = Url::from_url("consumer://192.168.0.102:50052/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&bind.ip=192.168.0.102&bind.port=50052&category=configurators&check=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&ipv6=fd00:6cb1:58a2:8ddf:0:0:0:1000&methods=sayHello,sayHelloAsync&pid=44270&service-name-mapping=true&side=provider").unwrap(); + let subscribe_ret = registry.subscribe(subscribe_url).await; - if let Err(e) = ret { + if let Err(e) = subscribe_ret { error!("error message: {:?}", e); return; } + let mut rx = subscribe_ret.unwrap(); + let change = rx.recv().await; + info!("receive change: {:?}", change); + let sleep_millis = time::Duration::from_secs(300); thread::sleep(sleep_millis); } - #[test] + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore] - fn test_unsubscribe() { + pub async fn test_unsubscribe() { tracing_subscriber::fmt() .with_thread_names(true) .with_file(true) @@ -668,33 +715,35 @@ pub mod tests { .init(); let nacos_registry_url = Url::from_url("nacos://127.0.0.1:8848/org.apache.dubbo.registry.RegistryService?application=dubbo-demo-triple-api-provider&dubbo=2.0.2&interface=org.apache.dubbo.registry.RegistryService&pid=7015").unwrap(); - let mut registry = NacosRegistry::new(nacos_registry_url); + let registry = NacosRegistry::new(nacos_registry_url); let mut service_url = Url::from_url("tri://127.0.0.1:50052/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&methods=sayHello,sayHelloAsync&pid=7015&service-name-mapping=true&side=provider×tamp=1670060843807").unwrap(); service_url .params .insert(SIDE_KEY.to_owned(), PROVIDER_SIDE.to_owned()); - let ret = registry.register(service_url); + let ret = registry.register(service_url).await; info!("register result: {:?}", ret); let subscribe_url = Url::from_url("provider://192.168.0.102:50052/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&bind.ip=192.168.0.102&bind.port=50052&category=configurators&check=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&ipv6=fd00:6cb1:58a2:8ddf:0:0:0:1000&methods=sayHello,sayHelloAsync&pid=44270&service-name-mapping=true&side=provider").unwrap(); - let listener = Arc::new(TestNotifyListener); - - let ret = registry.subscribe(subscribe_url, listener.clone()); + let ret = registry.subscribe(subscribe_url).await; if let Err(e) = ret { error!("error message: {:?}", e); return; } + let mut rx = ret.unwrap(); + let change = rx.recv().await; + info!("receive change: {:?}", change); + let sleep_millis = time::Duration::from_secs(40); thread::sleep(sleep_millis); let unsubscribe_url = Url::from_url("provider://192.168.0.102:50052/org.apache.dubbo.demo.GreeterService?anyhost=true&application=dubbo-demo-triple-api-provider&background=false&bind.ip=192.168.0.102&bind.port=50052&category=configurators&check=false&deprecated=false&dubbo=2.0.2&dynamic=true&generic=false&interface=org.apache.dubbo.demo.GreeterService&ipv6=fd00:6cb1:58a2:8ddf:0:0:0:1000&methods=sayHello,sayHelloAsync&pid=44270&service-name-mapping=true&side=provider").unwrap(); - let ret = registry.unsubscribe(unsubscribe_url, listener.clone()); + let ret = registry.unsubscribe(unsubscribe_url).await; if let Err(e) = ret { error!("error message: {:?}", e); diff --git a/registry/zookeeper/Cargo.toml b/registry/zookeeper/Cargo.toml index 2df54995..ebcb2694 100644 --- a/registry/zookeeper/Cargo.toml +++ b/registry/zookeeper/Cargo.toml @@ -9,10 +9,13 @@ repository = "https://github.com/apache/dubbo-rust.git" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -zookeeper = "0.7.0" +zookeeper = "0.8.0" dubbo.workspace = true +anyhow.workspace = true serde_json.workspace = true serde = { workspace = true, features = ["derive"] } urlencoding.workspace = true dubbo-logger.workspace = true dubbo-base.workspace = true +tokio.workspace = true +async-trait.workspace = true diff --git a/registry/zookeeper/src/lib.rs b/registry/zookeeper/src/lib.rs index 6a6e94ba..f3733d50 100644 --- a/registry/zookeeper/src/lib.rs +++ b/registry/zookeeper/src/lib.rs @@ -17,31 +17,24 @@ #![allow(unused_variables, dead_code, missing_docs)] -use std::{ - collections::{HashMap, HashSet}, - env, - sync::{Arc, Mutex, RwLock}, - time::Duration, -}; +use std::{collections::HashMap, env, sync::Arc, time::Duration}; +use async_trait::async_trait; use dubbo_base::{ constants::{DUBBO_KEY, LOCALHOST_IP, PROVIDERS_KEY}, Url, }; use dubbo_logger::tracing::{debug, error, info}; use serde::{Deserialize, Serialize}; -#[allow(unused_imports)] -use zookeeper::{Acl, CreateMode, WatchedEvent, WatchedEventType, Watcher, ZkError, ZooKeeper}; +use tokio::{select, sync::mpsc}; +use zookeeper::{Acl, CreateMode, WatchedEvent, WatchedEventType, Watcher, ZooKeeper}; use dubbo::{ - registry::{ - memory_registry::MemoryRegistry, NotifyListener, Registry, RegistryNotifyListener, - ServiceEvent, - }, + registry::n_registry::{DiscoverStream, Registry, ServiceChange}, StdError, }; -// Get metadata of a service registration from a URL +// 从url中获取服务注册的元数据 // rawURL = fmt.Sprintf("%s://%s%s?%s", c.Protocol, host, c.Path, s) // dubboPath = fmt.Sprintf("/%s/%s/%s", r.URL.GetParam(constant.RegistryGroupKey, "dubbo"), r.service(c), common.DubboNodes[common.PROVIDER]) @@ -54,12 +47,9 @@ impl Watcher for LoggingWatcher { } } -//#[derive(Debug)] pub struct ZookeeperRegistry { root_path: String, zk_client: Arc, - listeners: RwLock>, - memory_registry: Arc>, } #[derive(Serialize, Deserialize, Debug)] @@ -91,24 +81,6 @@ impl ZookeeperRegistry { ZookeeperRegistry { root_path: "/services".to_string(), zk_client: Arc::new(zk_client), - listeners: RwLock::new(HashMap::new()), - memory_registry: Arc::new(Mutex::new(MemoryRegistry::default())), - } - } - - fn create_listener( - &self, - path: String, - service_name: String, - listener: RegistryNotifyListener, - ) -> ServiceInstancesChangedListener { - let mut service_names = HashSet::new(); - service_names.insert(service_name.clone()); - ServiceInstancesChangedListener { - zk_client: Arc::clone(&self.zk_client), - path, - service_name: service_name.clone(), - listener, } } @@ -127,10 +99,6 @@ impl ZookeeperRegistry { s.to_string() } - pub fn get_client(&self) -> Arc { - self.zk_client.clone() - } - // If the parent node does not exist in the ZooKeeper, Err(ZkError::NoNode) will be returned. pub fn create_path( &self, @@ -153,8 +121,8 @@ impl ZookeeperRegistry { match zk_result { Ok(_) => Ok(()), Err(err) => { - error!("create path {} to zookeeper error {}", path, err); - Err(Box::try_from(err).unwrap()) + error!("zk path {} parent not exists.", path); + Err(err.into()) } } } @@ -176,28 +144,12 @@ impl ZookeeperRegistry { current.push('/'); current.push_str(node_key); if !self.exists_path(current.as_str()) { - let new_create_mode = match children == node_key { - true => create_mode, - false => CreateMode::Persistent, - }; - let new_data = match children == node_key { - true => data, - false => "", + let (new_create_mode, new_data) = match children == node_key { + true => (create_mode, data), + false => (CreateMode::Persistent, ""), }; - //Skip ZkError::NodeExists - let res = self.create_path(current.as_str(), new_data, new_create_mode); - let mut node_exist = false; - if let Err(e) = &res { - if let Some(zk_err) = e.downcast_ref::() { - if ZkError::NodeExists == *zk_err { - node_exist = true; - } - } - } - if !node_exist { - return res; - } + self.create_path(current.as_str(), new_data, new_create_mode)?; } } Ok(()) @@ -205,7 +157,7 @@ impl ZookeeperRegistry { pub fn delete_path(&self, path: &str) { if self.exists_path(path) { - self.get_client().delete(path, None).unwrap() + self.zk_client.delete(path, None).unwrap() } } @@ -225,6 +177,65 @@ impl ZookeeperRegistry { None } } + + pub fn diff<'a>( + old_urls: &'a Vec, + new_urls: &'a Vec, + ) -> (Vec, Vec) { + let old_urls_map: HashMap = old_urls + .iter() + .map(|url| dubbo_base::Url::from_url(url.as_str())) + .filter(|item| item.is_some()) + .map(|item| item.unwrap()) + .map(|item| { + let ip_port = item.get_ip_port(); + let url = item.encoded_raw_url_string(); + (ip_port, url) + }) + .collect(); + + let new_urls_map: HashMap = new_urls + .iter() + .map(|url| dubbo_base::Url::from_url(url.as_str())) + .filter(|item| item.is_some()) + .map(|item| item.unwrap()) + .map(|item| { + let ip_port = item.get_ip_port(); + let url = item.encoded_raw_url_string(); + (ip_port, url) + }) + .collect(); + + let mut add_hosts = Vec::new(); + let mut removed_hosts = Vec::new(); + + for (key, new_host) in new_urls_map.iter() { + let old_host = old_urls_map.get(key); + match old_host { + None => { + add_hosts.push(new_host.clone()); + } + Some(old_host) => { + if !old_host.eq(new_host) { + removed_hosts.push(old_host.clone()); + add_hosts.push(new_host.clone()); + } + } + } + } + + for (key, old_host) in old_urls_map.iter() { + let new_host = old_urls_map.get(key); + match new_host { + None => { + removed_hosts.push(old_host.clone()); + } + Some(_) => {} + } + } + + (removed_hosts, add_hosts) + } } impl Default for ZookeeperRegistry { @@ -248,8 +259,9 @@ impl Default for ZookeeperRegistry { } } +#[async_trait] impl Registry for ZookeeperRegistry { - fn register(&mut self, url: Url) -> Result<(), StdError> { + async fn register(&self, url: Url) -> Result<(), StdError> { debug!("register url: {}", url); let zk_path = format!( "/{}/{}/{}/{}", @@ -262,7 +274,7 @@ impl Registry for ZookeeperRegistry { Ok(()) } - fn unregister(&mut self, url: Url) -> Result<(), StdError> { + async fn unregister(&self, url: Url) -> Result<(), StdError> { let zk_path = format!( "/{}/{}/{}/{}", DUBBO_KEY, @@ -275,194 +287,155 @@ impl Registry for ZookeeperRegistry { } // for consumer to find the changes of providers - fn subscribe(&self, url: Url, listener: RegistryNotifyListener) -> Result<(), StdError> { + async fn subscribe(&self, url: Url) -> Result { let service_name = url.get_service_name(); let zk_path = format!("/{}/{}/{}", DUBBO_KEY, &service_name, PROVIDERS_KEY); - if self - .listeners - .read() - .unwrap() - .get(service_name.as_str()) - .is_some() - { - return Ok(()); - } - self.listeners - .write() - .unwrap() - .insert(service_name.to_string(), listener.clone()); + debug!("subscribe service: {}", zk_path); - let zk_listener = - self.create_listener(zk_path.clone(), service_name.to_string(), listener.clone()); + let (listener, mut change_rx) = ZooKeeperListener::new(); + let arc_listener = Arc::new(listener); - let zk_changed_paths = self.zk_client.get_children_w(&zk_path, zk_listener); - let result = match zk_changed_paths { - Err(err) => { - error!("zk subscribe error: {}", err); - Vec::new() + let watcher = ZooKeeperWatcher::new(arc_listener.clone(), zk_path.clone()); + + let (discover_tx, discover_rx) = mpsc::channel(64); + + let zk_client_in_task = self.zk_client.clone(); + let zk_path_in_task = zk_path.clone(); + let service_name_in_task = service_name.clone(); + let arc_listener_in_task = arc_listener.clone(); + tokio::spawn(async move { + let zk_client = zk_client_in_task; + let zk_path = zk_path_in_task; + let service_name = service_name_in_task; + let listener = arc_listener_in_task; + + let mut current_urls = Vec::new(); + + loop { + let changed = select! { + _ = discover_tx.closed() => { + info!("discover task quit, discover channel closed"); + None + }, + changed = change_rx.recv() => { + changed + } + }; + + match changed { + Some(_) => { + let zookeeper_watcher = + ZooKeeperWatcher::new(listener.clone(), zk_path.clone()); + + match zk_client.get_children_w(&zk_path, zookeeper_watcher) { + Ok(children) => { + let (removed, add) = + ZookeeperRegistry::diff(¤t_urls, &children); + + for url in removed { + match discover_tx + .send(Ok(ServiceChange::Remove(url.clone()))) + .await + { + Ok(_) => {} + Err(e) => { + error!("send service change failed: {:?}, maybe user unsubscribe", e); + break; + } + } + } + + for url in add { + match discover_tx + .send(Ok(ServiceChange::Insert(url.clone(), ()))) + .await + { + Ok(_) => {} + Err(e) => { + error!("send service change failed: {:?}, maybe user unsubscribe", e); + break; + } + } + } + + current_urls = children; + } + Err(err) => { + error!("zk subscribe error: {}", err); + break; + } + } + } + None => { + error!("receive service change task quit, unsubscribe {}.", zk_path); + break; + } + } } - Ok(urls) => urls - .iter() - .map(|node_key| { - let provider_url: Url = urlencoding::decode(node_key) - .unwrap() - .to_string() - .as_str() - .into(); - provider_url - }) - .collect(), - }; - info!("notifying {}->{:?}", service_name, result); - listener.notify(ServiceEvent { - key: service_name, - action: String::from("ADD"), - service: result, + + debug!("unsubscribe service: {}", zk_path); }); - Ok(()) - } - fn unsubscribe(&self, url: Url, listener: RegistryNotifyListener) -> Result<(), StdError> { - todo!() + arc_listener.changed(zk_path); + + Ok(discover_rx) } -} -pub struct ServiceInstancesChangedListener { - zk_client: Arc, - path: String, - service_name: String, - listener: RegistryNotifyListener, -} + async fn unsubscribe(&self, url: Url) -> Result<(), StdError> { + let service_name = url.get_service_name(); + let zk_path = format!("/{}/{}/{}", DUBBO_KEY, &service_name, PROVIDERS_KEY); -impl Watcher for ServiceInstancesChangedListener { - fn handle(&self, event: WatchedEvent) { - if let (WatchedEventType::NodeChildrenChanged, Some(path)) = (event.event_type, event.path) - { - let event_path = path.clone(); - let dirs = self - .zk_client - .get_children(&event_path, false) - .expect("msg"); - let result: Vec = dirs - .iter() - .map(|node_key| { - let provider_url: Url = node_key.as_str().into(); - provider_url - }) - .collect(); - let res = self.zk_client.get_children_w( - &path, - ServiceInstancesChangedListener { - zk_client: Arc::clone(&self.zk_client), - path: path.clone(), - service_name: self.service_name.clone(), - listener: Arc::clone(&self.listener), - }, - ); - - info!("notify {}->{:?}", self.service_name, result); - self.listener.notify(ServiceEvent { - key: self.service_name.clone(), - action: String::from("ADD"), - service: result, - }); - } + info!("unsubscribe service: {}", zk_path); + Ok(()) } } -impl NotifyListener for ServiceInstancesChangedListener { - fn notify(&self, event: ServiceEvent) { - self.listener.notify(event); +pub struct ZooKeeperListener { + tx: mpsc::Sender, +} + +impl ZooKeeperListener { + pub fn new() -> (ZooKeeperListener, mpsc::Receiver) { + let (tx, rx) = mpsc::channel(64); + let this = ZooKeeperListener { tx }; + (this, rx) } - fn notify_all(&self, event: ServiceEvent) { - self.listener.notify(event); + pub fn changed(&self, path: String) { + match self.tx.try_send(path) { + Ok(_) => {} + Err(err) => { + error!("send change list to listener occur an error: {}", err); + return; + } + } } } -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use zookeeper::{Acl, CreateMode, WatchedEvent, Watcher}; - - use crate::ZookeeperRegistry; - - struct TestZkWatcher { - pub watcher: Arc>, - } +pub struct ZooKeeperWatcher { + listener: Arc, + path: String, +} - impl Watcher for TestZkWatcher { - fn handle(&self, event: WatchedEvent) { - println!("event: {:?}", event); - } +impl ZooKeeperWatcher { + pub fn new(listener: Arc, path: String) -> ZooKeeperWatcher { + ZooKeeperWatcher { listener, path } } +} - #[test] - fn zk_read_write_watcher() { - // https://github.com/bonifaido/rust-zookeeper/blob/master/examples/zookeeper_example.rs - // using ENV to set zookeeper server urls - let zkr = ZookeeperRegistry::default(); - let zk_client = zkr.get_client(); - let watcher = TestZkWatcher { - watcher: Arc::new(None), - }; - if zk_client.exists("/test", true).is_err() { - zk_client - .create( - "/test", - vec![1, 3], - Acl::open_unsafe().clone(), - CreateMode::Ephemeral, - ) - .unwrap(); - } - let zk_res = zk_client.create( - "/test", - "hello".into(), - Acl::open_unsafe().clone(), - CreateMode::Ephemeral, - ); - let result = zk_client.get_children_w("/test", watcher); - assert!(result.is_ok()); - if zk_client.exists("/test/a", true).is_err() { - zk_client.delete("/test/a", None).unwrap(); - } - if zk_client.exists("/test/a", true).is_err() { - zk_client.delete("/test/b", None).unwrap(); +impl Watcher for ZooKeeperWatcher { + fn handle(&self, event: WatchedEvent) { + info!("receive zookeeper event: {:?}", event); + let event_type: WatchedEventType = event.event_type; + match event_type { + WatchedEventType::None => { + info!("event type is none, ignore it."); + return; + } + _ => {} } - let zk_res = zk_client.create( - "/test/a", - "hello".into(), - Acl::open_unsafe().clone(), - CreateMode::Ephemeral, - ); - let zk_res = zk_client.create( - "/test/b", - "world".into(), - Acl::open_unsafe().clone(), - CreateMode::Ephemeral, - ); - let test_a_result = zk_client.get_data("/test", true); - assert!(test_a_result.is_ok()); - let vec1 = test_a_result.unwrap().0; - // data in /test should equals to "hello" - assert_eq!(String::from_utf8(vec1).unwrap(), "hello"); - zk_client.close().unwrap() - } - #[test] - fn create_path_with_parent_check() { - let zkr = ZookeeperRegistry::default(); - let path = "/du1bbo/test11111"; - let data = "hello"; - // creating a child on a not exists parent, throw a NoNode error. - // let result = zkr.create_path(path, data, CreateMode::Ephemeral); - // assert!(result.is_err()); - let create_with_parent_check_result = - zkr.create_path_with_parent_check(path, data, CreateMode::Ephemeral); - assert!(create_with_parent_check_result.is_ok()); - assert_eq!(data, zkr.get_data(path, false).unwrap()); + self.listener.changed(self.path.clone()); } }