forked from FederatedAI/FATE
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pipeline-quick-demo.py
84 lines (70 loc) · 4.35 KB
/
pipeline-quick-demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from pipeline.backend.config import Backend, WorkMode
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, DataTransform, Intersection, HeteroSecureBoost, Evaluation
from pipeline.interface import Data
from pipeline.runtime.entity import JobParameters
# table name & namespace in data storage
# data should be uploaded before running modeling task
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
# initialize pipeline
# Party ids are indicators of parties involved in federated learning. For standalone mode,
# arbitrary integers can be used as party id.
pipeline = PipeLine().set_initiator(role="guest", party_id=9999).set_roles(guest=9999, host=10000)
# define components
# Reader is a component to obtain the uploaded data. This component are very likely to be needed.
reader_0 = Reader(name="reader_0")
# By the following way, you can set different parameters for different party.
reader_0.get_party_instance(role="guest", party_id=9999).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=10000).component_param(table=host_train_data)
# Data transform provided some preprocessing to the raw data, including extract label, convert data format,
# filling missing value and so on. You may refer to the algorithm list doc for more details.
data_transform_0 = DataTransform(name="data_transform_0", with_label=True)
data_transform_0.get_party_instance(role="host", party_id=10000).component_param(with_label=False)
# Perform PSI for hetero-scenario.
intersect_0 = Intersection(name="intersection_0")
# Define a hetero-secureboost component. The following parameters will be set for all parties involved.
hetero_secureboost_0 = HeteroSecureBoost(name="hetero_secureboost_0",
num_trees=5,
bin_num=16,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "iterativeAffine"},
tree_param={"max_depth": 3})
# To show the evaluation result, an "Evaluation" component is needed.
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
# add components to pipeline, in order of task execution
# The components are connected by indicating upstream data output as their input.
# Typically, a feature engineering component will indicate input data as "data" while
# the modeling component will use "train_data". Please check out carefully of the difference
# between hetero_secureboost_0 input and other components below.
# Here we are just showing a simple example, for more details of other components, please check
# out the examples in "example/pipeline/{component you are interested in}
pipeline.add_component(reader_0)\
.add_component(data_transform_0, data=Data(data=reader_0.output.data))\
.add_component(intersect_0, data=Data(data=data_transform_0.output.data))\
.add_component(hetero_secureboost_0, data=Data(train_data=intersect_0.output.data))\
.add_component(evaluation_0, data=Data(data=hetero_secureboost_0.output.data))
# compile & fit pipeline
pipeline.compile().fit(JobParameters(backend=Backend.EGGROLL, work_mode=WorkMode.STANDALONE))
# to run this task with cluster deployment, use the following setting instead
# may change data engine backend according to actual environment
# pipeline.compile().fit(JobParameters(backend=Backend.EGGROLL, work_mode=WorkMode.CLUSTER))
# query component summary
print(f"Evaluation summary:\n{json.dumps(pipeline.get_component('evaluation_0').get_summary(), indent=4)}")