The purpose is to reduce memory usage.
Meaning that it should be optimized in a way that the hash is equal to the test hash.
What I've tried so far:
- Adding
__slots__but it didn't make any changes. - Change default dtype
float64tofloat32. Although it reduces the mem usage significantly, it brakes the test by changing the hash. - Converted data into
np.arrayreduced CPU times:from 13 s to 2.05 sbut didn't affect the memory usage.
The code to reproduce:
rows = 40000000
trs = 10
random.seed(42)
generated_data: tp.List[float] = np.array([random.random() for _ in range(rows)])
def df_upd(df_initial: pd.DataFrame, df_new: pd.DataFrame) -> pd.DataFrame:
return pd.concat((df_initial, df_new), axis=1)
class T:
"""adding a column of random data"""
__slots__ = ['var']
def __init__(self, var: float):
self.var = var
def transform(self, df_initial: pd.DataFrame) -> pd.DataFrame:
return df_upd(df_initial, pd.DataFrame({self.var: generated_data}))
class Pipeline:
__slots__ = ['df', 'transforms']
def __init__(self):
self.df = pd.DataFrame()
self.transforms = np.array([T(f"v{i}") for i in range(trs)])
def run(self):
for t in self.transforms:
self.df = t.transform(self.df)
return self.df
if __name__ == "__main__":
# starting the monitoring
tracemalloc.start()
# function call
pipe = Pipeline()
%time df = pipe.run()
print("running")
# displaying the memory
current, peak = tracemalloc.get_traced_memory()
print(f"Current memory usage is {current / 10**3} KB ({(current / 10**3)*0.001} MB); Peak was {peak / 10**3} KB ({(peak / 10**3)*0.001} MB); Diff = {(peak - current) / 10**3} KB ({((peak - current) / 10**3)*0.001} MB)")
# stopping the library
tracemalloc.stop()
# should stay unchanged
%time hashed_df = hashlib.sha256(pd.util.hash_pandas_object(df, index=True).values).hexdigest()
print("hashed_df", hashed_df)
assert hashed_df == test_hash
print("Success!")
generated_data: tp.List[float]should probably be closer togenerated_data: np.ndarray, etc.