Compare commits
2 Commits
abfc877c7d
...
0471cb0ab8
Author | SHA1 | Date | |
---|---|---|---|
0471cb0ab8 | |||
e05ccdabb9 |
@ -7,14 +7,11 @@ authors = ["coolneng <akasroua@gmail.com>"]
|
|||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "^3.8"
|
python = "^3.8"
|
||||||
scikit-learn = "^0.24.0"
|
scikit-learn = "^0.24.0"
|
||||||
pandas = "^1.2.0"
|
|
||||||
imbalanced-learn = "^0.7.0"
|
imbalanced-learn = "^0.7.0"
|
||||||
|
numpy = "^1.19.4"
|
||||||
|
|
||||||
[tool.poetry.dev-dependencies]
|
[tool.poetry.dev-dependencies]
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core>=1.0.0"]
|
requires = ["poetry-core>=1.0.0"]
|
||||||
build-backend = "poetry.core.masonry.api"
|
build-backend = "poetry.core.masonry.api"
|
||||||
|
|
||||||
[tool.poetry.scripts]
|
|
||||||
competition = "processing:main"
|
|
||||||
|
@ -2,4 +2,4 @@
|
|||||||
|
|
||||||
with pkgs;
|
with pkgs;
|
||||||
|
|
||||||
mkShell { buildInputs = [ python38 poetry ]; }
|
mkShell { buildInputs = [ python38 python38Packages.pandas poetry ]; }
|
||||||
|
@ -21,39 +21,33 @@ def rename_columns(df_list) -> DataFrame:
|
|||||||
|
|
||||||
def drop_null_values(df_list):
|
def drop_null_values(df_list):
|
||||||
for df in df_list:
|
for df in df_list:
|
||||||
df.dropna(inplace=True)
|
|
||||||
df.drop(columns="tipo_marchas", inplace=True)
|
df.drop(columns="tipo_marchas", inplace=True)
|
||||||
df["descuento"].fillna(0)
|
df.drop(columns="descuento", inplace=True)
|
||||||
return df_list
|
df.dropna(inplace=True)
|
||||||
|
|
||||||
|
|
||||||
def trim_column_names(df_list) -> DataFrame:
|
|
||||||
columns = ["consumo", "motor_CC", "potencia"]
|
|
||||||
for df in df_list:
|
|
||||||
for col in columns:
|
|
||||||
df[col] = df[col].str.replace(pat="[^.0-9]", repl="").astype(float)
|
|
||||||
return df_list
|
return df_list
|
||||||
|
|
||||||
|
|
||||||
def encode_columns(df_list):
|
def encode_columns(df_list):
|
||||||
label_encoder = LabelEncoder()
|
label_encoder = LabelEncoder()
|
||||||
files = [
|
files = [
|
||||||
"ao"
|
"ao",
|
||||||
"asientos"
|
"asientos",
|
||||||
"ciudad"
|
"ciudad",
|
||||||
"combustible"
|
"combustible",
|
||||||
"consumo"
|
"consumo",
|
||||||
"descuento"
|
"kilometros",
|
||||||
"kilometros"
|
"mano",
|
||||||
"mano"
|
"motor_cc",
|
||||||
"motor_cc"
|
"nombre",
|
||||||
"nombre"
|
"potencia",
|
||||||
"potencia"
|
|
||||||
]
|
]
|
||||||
for data in files:
|
for data in files:
|
||||||
for df in df_list:
|
for df in df_list:
|
||||||
label = label_encoder.fit(read_csv("data/" + data + ".csv", squeeze=True))
|
label = label_encoder.fit(read_csv("data/" + data + ".csv", squeeze=True))
|
||||||
df[data] = label.transform(df[data])
|
if data == "ao":
|
||||||
|
df["año"] = label.transform(df["año"])
|
||||||
|
else:
|
||||||
|
df[data] = label.transform(df[data])
|
||||||
return df_list
|
return df_list
|
||||||
|
|
||||||
|
|
||||||
@ -72,8 +66,10 @@ def balance_training_data(df):
|
|||||||
smote_tomek = SMOTETomek(random_state=42)
|
smote_tomek = SMOTETomek(random_state=42)
|
||||||
data, target = split_data_target(df=df, dataset="data")
|
data, target = split_data_target(df=df, dataset="data")
|
||||||
balanced_data, balanced_target = smote_tomek.fit_resample(data, target)
|
balanced_data, balanced_target = smote_tomek.fit_resample(data, target)
|
||||||
balanced_data_df = DataFrame(balanced_data, columns=data.columns)
|
balanced_data_df = DataFrame(
|
||||||
balanced_target_df = DataFrame(balanced_target, columns=target.columns)
|
balanced_data, columns=df.columns.difference(["precio_cat"])
|
||||||
|
)
|
||||||
|
balanced_target_df = DataFrame(balanced_target, columns=["precio_cat"])
|
||||||
return balanced_data_df, balanced_target_df
|
return balanced_data_df, balanced_target_df
|
||||||
|
|
||||||
|
|
||||||
@ -86,8 +82,7 @@ def parse_data(train, test):
|
|||||||
df_list = construct_dataframes(train=train, test=test)
|
df_list = construct_dataframes(train=train, test=test)
|
||||||
renamed_df_list = rename_columns(df_list)
|
renamed_df_list = rename_columns(df_list)
|
||||||
processed_df_list = drop_null_values(renamed_df_list)
|
processed_df_list = drop_null_values(renamed_df_list)
|
||||||
numeric_df_list = trim_column_names(processed_df_list)
|
encoded_df_list = encode_columns(processed_df_list)
|
||||||
encoded_df_list = encode_columns(numeric_df_list)
|
|
||||||
train_data, train_target = balance_training_data(encoded_df_list[0])
|
train_data, train_target = balance_training_data(encoded_df_list[0])
|
||||||
test_data, test_ids = split_data_target(encoded_df_list[1], dataset="test")
|
test_data, test_ids = split_data_target(encoded_df_list[1], dataset="test")
|
||||||
return train_data, train_target, test_data, test_ids
|
return train_data, train_target, test_data, test_ids
|
||||||
|
Loading…
Reference in New Issue
Block a user