Skip to content

Commit

Permalink
CI: Update docs for refs/heads/main (e0dcb72)
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Feb 14, 2024
1 parent e9e89b5 commit 8683dd0
Show file tree
Hide file tree
Showing 3 changed files with 128 additions and 41 deletions.
104 changes: 77 additions & 27 deletions docs/features/feature_collection.html
Original file line number Diff line number Diff line change
Expand Up @@ -600,11 +600,15 @@ <h1 class="title">Module <code>tsflex.features.feature_collection</code></h1>
&#34;&#34;&#34;
global group_indices, group_id_name, get_group_func
group_indices = grouped_data.indices # dict - group_id as key; indices as value
group_id_name = grouped_data.grouper.names # name of the group col(s)
# since in future versions of pandas grouper will be deprecated
group_attr = &#34;_grouper&#34; if hasattr(grouped_data, &#34;_grouper&#34;) else &#34;grouper&#34;
group_id_name = getattr(grouped_data, group_attr).names # name of group col(s)
get_group_func = self._group_feat_generator(grouped_data)

# sort_output_index can be set to False, since we want to keep the same order as
# the group_indices
return self._calculate_feature_list(
self._executor_grouped, n_jobs, show_progress, return_df, f_handler
self._executor_grouped, n_jobs, show_progress, return_df, False, f_handler
)

@staticmethod
Expand Down Expand Up @@ -756,7 +760,6 @@ <h1 class="title">Module <code>tsflex.features.feature_collection</code></h1>
calc_result[&#34;__end&#34;] = consecutive_grouped_by_df[&#34;end&#34;]

if return_df:
# concatenate rows
return calc_result
else:
return [calc_result[col] for col in calc_result.columns]
Expand Down Expand Up @@ -795,24 +798,28 @@ <h1 class="title">Module <code>tsflex.features.feature_collection</code></h1>
def _calculate_feature_list(
self,
executor: Callable[[int], pd.DataFrame],
n_jobs: Optional[int],
show_progress: Optional[bool],
return_df: Optional[bool],
f_handler: Optional[logging.FileHandler],
n_jobs: Union[int, None],
show_progress: bool,
return_df: bool,
sort_output_index: bool,
f_handler: logging.FileHandler,
) -&gt; Union[List[pd.DataFrame], pd.DataFrame]:
&#34;&#34;&#34;Calculate the features for the given executor.

Parameters
----------
executor : Callable[[int], pd.DataFrame]
The executor function that will be used to calculate the features.
n_jobs : Optional[int], optional
n_jobs : Union[int, None]
The number of jobs to run in parallel.
show_progress : Optional[bool], optional
show_progress : bool
Whether to show a progress bar.
return_df : Optional[bool], optional
return_df : bool
Whether to return a DataFrame or a list of DataFrames.
f_handler : Optional[logging.FileHandler], optional
sort_output_index : bool
Whether to sort the output index. Note that this is only relevant when
`return_df` is set to `True`.
f_handler : logging.FileHandler
The file handler that is used to log the function execution times.

Returns
Expand Down Expand Up @@ -865,7 +872,13 @@ <h1 class="title">Module <code>tsflex.features.feature_collection</code></h1>

if return_df:
# Concatenate &amp; sort the columns
df = pd.concat(calculated_feature_list, axis=1, join=&#34;outer&#34;, copy=False)
df = pd.concat(
calculated_feature_list,
axis=1,
join=&#34;outer&#34;,
copy=False,
sort=sort_output_index,
)
return df.reindex(sorted(df.columns), axis=1)
else:
return calculated_feature_list
Expand Down Expand Up @@ -1201,6 +1214,9 @@ <h1 class="title">Module <code>tsflex.features.feature_collection</code></h1>
f_handler=f_handler,
)

# Sort output index if segment indices are not provided
sort_output_index = segment_start_idxs is None and segment_end_idxs is None

# Convert to numpy array (if necessary)
if segment_start_idxs is not None:
segment_start_idxs = FeatureCollection._process_segment_idxs(
Expand Down Expand Up @@ -1282,7 +1298,12 @@ <h1 class="title">Module <code>tsflex.features.feature_collection</code></h1>
)

return self._calculate_feature_list(
self._executor_stroll, n_jobs, show_progress, return_df, f_handler
self._executor_stroll,
n_jobs,
show_progress,
return_df,
sort_output_index,
f_handler,
)

def serialize(self, file_path: Union[str, Path]):
Expand Down Expand Up @@ -1937,11 +1958,15 @@ <h2 class="section-title" id="header-classes">Classes</h2>
&#34;&#34;&#34;
global group_indices, group_id_name, get_group_func
group_indices = grouped_data.indices # dict - group_id as key; indices as value
group_id_name = grouped_data.grouper.names # name of the group col(s)
# since in future versions of pandas grouper will be deprecated
group_attr = &#34;_grouper&#34; if hasattr(grouped_data, &#34;_grouper&#34;) else &#34;grouper&#34;
group_id_name = getattr(grouped_data, group_attr).names # name of group col(s)
get_group_func = self._group_feat_generator(grouped_data)

# sort_output_index can be set to False, since we want to keep the same order as
# the group_indices
return self._calculate_feature_list(
self._executor_grouped, n_jobs, show_progress, return_df, f_handler
self._executor_grouped, n_jobs, show_progress, return_df, False, f_handler
)

@staticmethod
Expand Down Expand Up @@ -2093,7 +2118,6 @@ <h2 class="section-title" id="header-classes">Classes</h2>
calc_result[&#34;__end&#34;] = consecutive_grouped_by_df[&#34;end&#34;]

if return_df:
# concatenate rows
return calc_result
else:
return [calc_result[col] for col in calc_result.columns]
Expand Down Expand Up @@ -2132,24 +2156,28 @@ <h2 class="section-title" id="header-classes">Classes</h2>
def _calculate_feature_list(
self,
executor: Callable[[int], pd.DataFrame],
n_jobs: Optional[int],
show_progress: Optional[bool],
return_df: Optional[bool],
f_handler: Optional[logging.FileHandler],
n_jobs: Union[int, None],
show_progress: bool,
return_df: bool,
sort_output_index: bool,
f_handler: logging.FileHandler,
) -&gt; Union[List[pd.DataFrame], pd.DataFrame]:
&#34;&#34;&#34;Calculate the features for the given executor.

Parameters
----------
executor : Callable[[int], pd.DataFrame]
The executor function that will be used to calculate the features.
n_jobs : Optional[int], optional
n_jobs : Union[int, None]
The number of jobs to run in parallel.
show_progress : Optional[bool], optional
show_progress : bool
Whether to show a progress bar.
return_df : Optional[bool], optional
return_df : bool
Whether to return a DataFrame or a list of DataFrames.
f_handler : Optional[logging.FileHandler], optional
sort_output_index : bool
Whether to sort the output index. Note that this is only relevant when
`return_df` is set to `True`.
f_handler : logging.FileHandler
The file handler that is used to log the function execution times.

Returns
Expand Down Expand Up @@ -2202,7 +2230,13 @@ <h2 class="section-title" id="header-classes">Classes</h2>

if return_df:
# Concatenate &amp; sort the columns
df = pd.concat(calculated_feature_list, axis=1, join=&#34;outer&#34;, copy=False)
df = pd.concat(
calculated_feature_list,
axis=1,
join=&#34;outer&#34;,
copy=False,
sort=sort_output_index,
)
return df.reindex(sorted(df.columns), axis=1)
else:
return calculated_feature_list
Expand Down Expand Up @@ -2538,6 +2572,9 @@ <h2 class="section-title" id="header-classes">Classes</h2>
f_handler=f_handler,
)

# Sort output index if segment indices are not provided
sort_output_index = segment_start_idxs is None and segment_end_idxs is None

# Convert to numpy array (if necessary)
if segment_start_idxs is not None:
segment_start_idxs = FeatureCollection._process_segment_idxs(
Expand Down Expand Up @@ -2619,7 +2656,12 @@ <h2 class="section-title" id="header-classes">Classes</h2>
)

return self._calculate_feature_list(
self._executor_stroll, n_jobs, show_progress, return_df, f_handler
self._executor_stroll,
n_jobs,
show_progress,
return_df,
sort_output_index,
f_handler,
)

def serialize(self, file_path: Union[str, Path]):
Expand Down Expand Up @@ -3234,6 +3276,9 @@ <h2 id="raises">Raises</h2>
f_handler=f_handler,
)

# Sort output index if segment indices are not provided
sort_output_index = segment_start_idxs is None and segment_end_idxs is None

# Convert to numpy array (if necessary)
if segment_start_idxs is not None:
segment_start_idxs = FeatureCollection._process_segment_idxs(
Expand Down Expand Up @@ -3315,7 +3360,12 @@ <h2 id="raises">Raises</h2>
)

return self._calculate_feature_list(
self._executor_stroll, n_jobs, show_progress, return_df, f_handler
self._executor_stroll,
n_jobs,
show_progress,
return_df,
sort_output_index,
f_handler,
)</code></pre>
</details>
<div class="desc"><p>Calculate features on the passed data.</p>
Expand Down
57 changes: 43 additions & 14 deletions docs/features/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -1638,11 +1638,15 @@ <h2 id="note">Note</h2>
&#34;&#34;&#34;
global group_indices, group_id_name, get_group_func
group_indices = grouped_data.indices # dict - group_id as key; indices as value
group_id_name = grouped_data.grouper.names # name of the group col(s)
# since in future versions of pandas grouper will be deprecated
group_attr = &#34;_grouper&#34; if hasattr(grouped_data, &#34;_grouper&#34;) else &#34;grouper&#34;
group_id_name = getattr(grouped_data, group_attr).names # name of group col(s)
get_group_func = self._group_feat_generator(grouped_data)

# sort_output_index can be set to False, since we want to keep the same order as
# the group_indices
return self._calculate_feature_list(
self._executor_grouped, n_jobs, show_progress, return_df, f_handler
self._executor_grouped, n_jobs, show_progress, return_df, False, f_handler
)

@staticmethod
Expand Down Expand Up @@ -1794,7 +1798,6 @@ <h2 id="note">Note</h2>
calc_result[&#34;__end&#34;] = consecutive_grouped_by_df[&#34;end&#34;]

if return_df:
# concatenate rows
return calc_result
else:
return [calc_result[col] for col in calc_result.columns]
Expand Down Expand Up @@ -1833,24 +1836,28 @@ <h2 id="note">Note</h2>
def _calculate_feature_list(
self,
executor: Callable[[int], pd.DataFrame],
n_jobs: Optional[int],
show_progress: Optional[bool],
return_df: Optional[bool],
f_handler: Optional[logging.FileHandler],
n_jobs: Union[int, None],
show_progress: bool,
return_df: bool,
sort_output_index: bool,
f_handler: logging.FileHandler,
) -&gt; Union[List[pd.DataFrame], pd.DataFrame]:
&#34;&#34;&#34;Calculate the features for the given executor.

Parameters
----------
executor : Callable[[int], pd.DataFrame]
The executor function that will be used to calculate the features.
n_jobs : Optional[int], optional
n_jobs : Union[int, None]
The number of jobs to run in parallel.
show_progress : Optional[bool], optional
show_progress : bool
Whether to show a progress bar.
return_df : Optional[bool], optional
return_df : bool
Whether to return a DataFrame or a list of DataFrames.
f_handler : Optional[logging.FileHandler], optional
sort_output_index : bool
Whether to sort the output index. Note that this is only relevant when
`return_df` is set to `True`.
f_handler : logging.FileHandler
The file handler that is used to log the function execution times.

Returns
Expand Down Expand Up @@ -1903,7 +1910,13 @@ <h2 id="note">Note</h2>

if return_df:
# Concatenate &amp; sort the columns
df = pd.concat(calculated_feature_list, axis=1, join=&#34;outer&#34;, copy=False)
df = pd.concat(
calculated_feature_list,
axis=1,
join=&#34;outer&#34;,
copy=False,
sort=sort_output_index,
)
return df.reindex(sorted(df.columns), axis=1)
else:
return calculated_feature_list
Expand Down Expand Up @@ -2239,6 +2252,9 @@ <h2 id="note">Note</h2>
f_handler=f_handler,
)

# Sort output index if segment indices are not provided
sort_output_index = segment_start_idxs is None and segment_end_idxs is None

# Convert to numpy array (if necessary)
if segment_start_idxs is not None:
segment_start_idxs = FeatureCollection._process_segment_idxs(
Expand Down Expand Up @@ -2320,7 +2336,12 @@ <h2 id="note">Note</h2>
)

return self._calculate_feature_list(
self._executor_stroll, n_jobs, show_progress, return_df, f_handler
self._executor_stroll,
n_jobs,
show_progress,
return_df,
sort_output_index,
f_handler,
)

def serialize(self, file_path: Union[str, Path]):
Expand Down Expand Up @@ -2935,6 +2956,9 @@ <h2 id="raises">Raises</h2>
f_handler=f_handler,
)

# Sort output index if segment indices are not provided
sort_output_index = segment_start_idxs is None and segment_end_idxs is None

# Convert to numpy array (if necessary)
if segment_start_idxs is not None:
segment_start_idxs = FeatureCollection._process_segment_idxs(
Expand Down Expand Up @@ -3016,7 +3040,12 @@ <h2 id="raises">Raises</h2>
)

return self._calculate_feature_list(
self._executor_stroll, n_jobs, show_progress, return_df, f_handler
self._executor_stroll,
n_jobs,
show_progress,
return_df,
sort_output_index,
f_handler,
)</code></pre>
</details>
<div class="desc"><p>Calculate features on the passed data.</p>
Expand Down
8 changes: 8 additions & 0 deletions docs/features/segmenter/strided_rolling.html
Original file line number Diff line number Diff line change
Expand Up @@ -626,6 +626,10 @@ <h1 class="title">Module <code>tsflex.features.segmenter.strided_rolling</code><
) -&gt; pd.Index:
assert start_idxs.dtype.type == np.datetime64
assert end_idxs.dtype.type == np.datetime64
if not len(start_idxs): # to fix &#34;datetime64 values must have a unit specified&#34;
assert not len(end_idxs)
start_idxs = start_idxs.astype(&#34;datetime64[ns]&#34;)
end_idxs = end_idxs.astype(&#34;datetime64[ns]&#34;)
start_idxs = pd.to_datetime(start_idxs, utc=True).tz_convert(self._tz_index)
end_idxs = pd.to_datetime(end_idxs, utc=True).tz_convert(self._tz_index)
return super()._get_output_index(start_idxs, end_idxs, name)
Expand Down Expand Up @@ -1803,6 +1807,10 @@ <h3>Methods</h3>
) -&gt; pd.Index:
assert start_idxs.dtype.type == np.datetime64
assert end_idxs.dtype.type == np.datetime64
if not len(start_idxs): # to fix &#34;datetime64 values must have a unit specified&#34;
assert not len(end_idxs)
start_idxs = start_idxs.astype(&#34;datetime64[ns]&#34;)
end_idxs = end_idxs.astype(&#34;datetime64[ns]&#34;)
start_idxs = pd.to_datetime(start_idxs, utc=True).tz_convert(self._tz_index)
end_idxs = pd.to_datetime(end_idxs, utc=True).tz_convert(self._tz_index)
return super()._get_output_index(start_idxs, end_idxs, name)
Expand Down

0 comments on commit 8683dd0

Please sign in to comment.