Skip to content

Instantly share code, notes, and snippets.

@lmassaron
Created October 18, 2022 06:14
Show Gist options
  • Select an option

  • Save lmassaron/08022e925ae1f40d2e39bac6703a881e to your computer and use it in GitHub Desktop.

Select an option

Save lmassaron/08022e925ae1f40d2e39bac6703a881e to your computer and use it in GitHub Desktop.
0-1 transformation
from scipy.stats import beta, norm
import numpy as np
data = np.array([0.0, 0.0, 0.1, 0.1, 0.2, 0.4, 0.5, 0.7, 0.8, 0.8, 0.9, 1.0, 1.0, 1.0])
eps = 0.000001
data[data==0.0] += eps
data[data==1.0] -= eps
a, b, loc, scale = beta.fit(data, floc=0, fscale=1)
def to_beta(data, a, b, loc, scale):
return beta.cdf(data, a=a, b=b, loc=loc, scale=scale)
def to_inv_norm(data):
return norm.ppf(data, loc=0, scale=1)
def to_norm(data):
return norm.cdf(data, loc=0, scale=1)
def to_inv_beta(data, a, b, loc, scale):
return beta.ppf(data, a=a, b=b, loc=loc, scale=scale)
transformed = to_inv_norm(to_beta(data, a, b, loc, scale))
reverted = to_inv_beta(to_norm(transformed), a, b, loc, scale)
print(transformed, reverted.round(5), data.round(5))
# also see: https://machinelearningmastery.com/empirical-distribution-function-in-python/
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment