Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 17 additions & 17 deletions openml/evaluations/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ def list_evaluations(
function: str,
offset: Optional[int] = None,
size: Optional[int] = None,
id: Optional[List] = None,
task: Optional[List] = None,
setup: Optional[List] = None,
flow: Optional[List] = None,
run: Optional[List] = None,
uploader: Optional[List] = None,
tag: Optional[str] = None,
Comment on lines 18 to 23
Copy link
Collaborator

@PGijsbers PGijsbers Oct 8, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we're going for more intuitive names, should they be changed to be plural, as they expect a list? Or perhaps even better, plural and _id suffix (e.g. run_ids), to indicate we are looking for something that parses to an id and not actual OpenMLX (e.g. OpenMLRun) objects instead?

Do we also want to change the typehint to Optionl[List[Union[str, int]]]? It's pretty verbose, but more complete... Maybe it would be enough to include this in the docstrings.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd be happy to change those, but I think this is for a future PR while this is about renaming an argument from id to run. Shall I open an issue so we don't forget about this?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just did this so I can merge & close PR. See #804.

per_fold: Optional[bool] = None,
Expand All @@ -38,14 +38,14 @@ def list_evaluations(
size : int, optional
the maximum number of runs to show

id : list, optional

task : list, optional

setup: list, optional

flow : list, optional

run : list, optional

uploader : list, optional

tag : str, optional
Expand Down Expand Up @@ -78,10 +78,10 @@ def list_evaluations(
function=function,
offset=offset,
size=size,
id=id,
task=task,
setup=setup,
flow=flow,
run=run,
uploader=uploader,
tag=tag,
sort_order=sort_order,
Expand All @@ -90,10 +90,10 @@ def list_evaluations(

def _list_evaluations(
function: str,
id: Optional[List] = None,
task: Optional[List] = None,
setup: Optional[List] = None,
flow: Optional[List] = None,
run: Optional[List] = None,
uploader: Optional[List] = None,
sort_order: Optional[str] = None,
output_format: str = 'object',
Expand All @@ -110,14 +110,14 @@ def _list_evaluations(
function : str
the evaluation function. e.g., predictive_accuracy

id : list, optional

task : list, optional

setup: list, optional

flow : list, optional

run : list, optional

uploader : list, optional

kwargs: dict, optional
Expand All @@ -143,14 +143,14 @@ def _list_evaluations(
if kwargs is not None:
for operator, value in kwargs.items():
api_call += "/%s/%s" % (operator, value)
if id is not None:
api_call += "/run/%s" % ','.join([str(int(i)) for i in id])
if task is not None:
api_call += "/task/%s" % ','.join([str(int(i)) for i in task])
if setup is not None:
api_call += "/setup/%s" % ','.join([str(int(i)) for i in setup])
if flow is not None:
api_call += "/flow/%s" % ','.join([str(int(i)) for i in flow])
if run is not None:
api_call += "/run/%s" % ','.join([str(int(i)) for i in run])
if uploader is not None:
api_call += "/uploader/%s" % ','.join([str(int(i)) for i in uploader])
if sort_order is not None:
Expand Down Expand Up @@ -256,10 +256,10 @@ def list_evaluations_setups(
function: str,
offset: Optional[int] = None,
size: Optional[int] = None,
id: Optional[List] = None,
task: Optional[List] = None,
setup: Optional[List] = None,
flow: Optional[List] = None,
run: Optional[List] = None,
uploader: Optional[List] = None,
tag: Optional[str] = None,
per_fold: Optional[bool] = None,
Expand All @@ -279,16 +279,16 @@ def list_evaluations_setups(
the number of runs to skip, starting from the first
size : int, optional
the maximum number of runs to show
id : list[int], optional
the list of evaluation ID's
task : list[int], optional
the list of task ID's
the list of task IDs
setup: list[int], optional
the list of setup ID's
the list of setup IDs
flow : list[int], optional
the list of flow ID's
the list of flow IDs
run : list[int], optional
the list of run IDs
uploader : list[int], optional
the list of uploader ID's
the list of uploader IDs
tag : str, optional
filter evaluation based on given tag
per_fold : bool, optional
Expand All @@ -312,7 +312,7 @@ def list_evaluations_setups(
"only for single flow_id")

# List evaluations
evals = list_evaluations(function=function, offset=offset, size=size, id=id, task=task,
evals = list_evaluations(function=function, offset=offset, size=size, run=run, task=task,
setup=setup, flow=flow, uploader=uploader, tag=tag,
per_fold=per_fold, sort_order=sort_order, output_format='dataframe')

Expand Down
2 changes: 1 addition & 1 deletion tests/test_evaluations/test_evaluation_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def test_evaluation_list_filter_run(self):
run_id = 12

evaluations = openml.evaluations.list_evaluations("predictive_accuracy",
id=[run_id])
run=[run_id])

self.assertEqual(len(evaluations), 1)
for run_id in evaluations.keys():
Expand Down