Skip to content

Commit 79ce0df

Browse files
FIx unit test
1 parent ff5c5c9 commit 79ce0df

File tree

6 files changed

+15
-12
lines changed

6 files changed

+15
-12
lines changed

autoPyTorch/api/base_task.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -749,14 +749,14 @@ def _search(
749749
running the top performing pipelines on max_budget.
750750
min_budget states the minimum resource allocation a pipeline should have
751751
so that we can compare and quickly discard bad performing models.
752-
For example, if the budget_type is epochs, and min_epochs=5, then we will
752+
For example, if the budget_type is epochs, and min_budget=5, then we will
753753
run every pipeline to a minimum of 5 epochs before performance comparison.
754754
max_budget (int):
755755
Auto-PyTorch uses `Hyperband <https://arxiv.org/abs/1603.06560>_` to
756756
trade-off resources between running many pipelines at min_budget and
757757
running the top performing pipelines on max_budget.
758758
max_budget states the maximum resource allocation a pipeline is going to
759-
be ran. For example, if the budget_type is epochs, and max_epochs=50,
759+
be ran. For example, if the budget_type is epochs, and max_budget=50,
760760
then the pipeline training will be terminated after 50 epochs.
761761
total_walltime_limit (int), (default=100): Time limit
762762
in seconds for the search of appropriate models.

autoPyTorch/api/tabular_classification.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,14 +159,14 @@ def search(
159159
running the top performing pipelines on max_budget.
160160
min_budget states the minimum resource allocation a pipeline should have
161161
so that we can compare and quickly discard bad performing models.
162-
For example, if the budget_type is epochs, and min_epochs=5, then we will
162+
For example, if the budget_type is epochs, and min_budget=5, then we will
163163
run every pipeline to a minimum of 5 epochs before performance comparison.
164164
max_budget (int):
165165
Auto-PyTorch uses `Hyperband <https://arxiv.org/abs/1603.06560>_` to
166166
trade-off resources between running many pipelines at min_budget and
167167
running the top performing pipelines on max_budget.
168168
max_budget states the maximum resource allocation a pipeline is going to
169-
be ran. For example, if the budget_type is epochs, and max_epochs=50,
169+
be ran. For example, if the budget_type is epochs, and max_budget=50,
170170
then the pipeline training will be terminated after 50 epochs.
171171
total_walltime_limit (int), (default=100): Time limit
172172
in seconds for the search of appropriate models.

autoPyTorch/api/tabular_regression.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,14 +151,14 @@ def search(
151151
running the top performing pipelines on max_budget.
152152
min_budget states the minimum resource allocation a pipeline should have
153153
so that we can compare and quickly discard bad performing models.
154-
For example, if the budget_type is epochs, and min_epochs=5, then we will
154+
For example, if the budget_type is epochs, and min_budget=5, then we will
155155
run every pipeline to a minimum of 5 epochs before performance comparison.
156156
max_budget (int):
157157
Auto-PyTorch uses `Hyperband <https://arxiv.org/abs/1603.06560>_` to
158158
trade-off resources between running many pipelines at min_budget and
159159
running the top performing pipelines on max_budget.
160160
max_budget states the maximum resource allocation a pipeline is going to
161-
be ran. For example, if the budget_type is epochs, and max_epochs=50,
161+
be ran. For example, if the budget_type is epochs, and max_budget=50,
162162
then the pipeline training will be terminated after 50 epochs.
163163
total_walltime_limit (int), (default=100): Time limit
164164
in seconds for the search of appropriate models.

autoPyTorch/optimizer/smbo.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,14 +178,14 @@ def __init__(self,
178178
running the top performing pipelines on max_budget.
179179
min_budget states the minimum resource allocation a pipeline should have
180180
so that we can compare and quickly discard bad performing models.
181-
For example, if the budget_type is epochs, and min_epochs=5, then we will
181+
For example, if the budget_type is epochs, and min_budget=5, then we will
182182
run every pipeline to a minimum of 5 epochs before performance comparison.
183183
max_budget (int):
184184
Auto-PyTorch uses `Hyperband <https://arxiv.org/abs/1603.06560>_` to
185185
trade-off resources between running many pipelines at min_budget and
186186
running the top performing pipelines on max_budget.
187187
max_budget states the maximum resource allocation a pipeline is going to
188-
be ran. For example, if the budget_type is epochs, and max_epochs=50,
188+
be ran. For example, if the budget_type is epochs, and max_budget=50,
189189
then the pipeline training will be terminated after 50 epochs.
190190
"""
191191
super(AutoMLSMBO, self).__init__()

test/test_api/test_base_api.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,6 @@ def test_set_pipeline_config():
9797
estimator = BaseTask()
9898
pipeline_options = {"device": "cuda",
9999
"budget_type": "epochs",
100-
"min_epochs": 10,
101100
"epochs": 51,
102101
"runtime": 360}
103102
estimator.set_pipeline_config(**pipeline_options)

test/test_evaluation/test_train_evaluator.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ def tearDown(self):
8787

8888
@unittest.mock.patch('autoPyTorch.pipeline.tabular_classification.TabularClassificationPipeline')
8989
def test_holdout(self, pipeline_mock):
90+
pipeline_mock.fit_dictionary = {'budget_type': 'epochs', 'epochs': 50}
9091
# Binary iris, contains 69 train samples, 31 test samples
9192
D = get_binary_classification_datamanager()
9293
pipeline_mock.predict_proba.side_effect = \
@@ -99,7 +100,8 @@ def test_holdout(self, pipeline_mock):
99100
backend_api.load_datamanager = lambda: D
100101
queue_ = multiprocessing.Queue()
101102

102-
evaluator = TrainEvaluator(backend_api, queue_, configuration=configuration, metric=accuracy, budget=0)
103+
evaluator = TrainEvaluator(backend_api, queue_, configuration=configuration, metric=accuracy, budget=0,
104+
pipeline_config={'budget_type': 'epochs', 'epochs': 50})
103105
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
104106
evaluator.file_output.return_value = (None, {})
105107

@@ -137,7 +139,8 @@ def test_cv(self, pipeline_mock):
137139
backend_api.load_datamanager = lambda: D
138140
queue_ = multiprocessing.Queue()
139141

140-
evaluator = TrainEvaluator(backend_api, queue_, configuration=configuration, metric=accuracy, budget=0)
142+
evaluator = TrainEvaluator(backend_api, queue_, configuration=configuration, metric=accuracy, budget=0,
143+
pipeline_config={'budget_type': 'epochs', 'epochs': 50})
141144
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
142145
evaluator.file_output.return_value = (None, {})
143146

@@ -241,7 +244,8 @@ def test_predict_proba_binary_classification(self, mock):
241244
configuration = unittest.mock.Mock(spec=Configuration)
242245
queue_ = multiprocessing.Queue()
243246

244-
evaluator = TrainEvaluator(self.backend_mock, queue_, configuration=configuration, metric=accuracy, budget=0)
247+
evaluator = TrainEvaluator(self.backend_mock, queue_, configuration=configuration, metric=accuracy, budget=0,
248+
pipeline_config={'budget_type': 'epochs', 'epochs': 50})
245249

246250
evaluator.fit_predict_and_loss()
247251
Y_optimization_pred = self.backend_mock.save_numrun_to_dir.call_args_list[0][1][

0 commit comments

Comments
 (0)