mirror of
https://github.com/jung-geun/PSO.git
synced 2025-12-19 20:44:39 +09:00
코드 변경 내용을 요약한 커밋 메시지입니다.
This commit is contained in:
72
test/bean.py
Normal file
72
test/bean.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import os
|
||||
|
||||
from keras.layers import Dense
|
||||
from keras.models import Sequential
|
||||
from keras.utils import to_categorical
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow import keras
|
||||
from ucimlrepo import fetch_ucirepo
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
|
||||
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(Dense(12, input_dim=16, activation="relu"))
|
||||
model.add(Dense(8, activation="relu"))
|
||||
model.add(Dense(7, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def get_data():
|
||||
# fetch dataset
|
||||
dry_bean_dataset = fetch_ucirepo(id=602)
|
||||
|
||||
# data (as pandas dataframes)
|
||||
X = dry_bean_dataset.data.features
|
||||
y = dry_bean_dataset.data.targets
|
||||
|
||||
x = X.to_numpy()
|
||||
# object to categorical
|
||||
|
||||
x = x.astype("float32")
|
||||
|
||||
y_class = to_categorical(y)
|
||||
|
||||
# metadata
|
||||
# print(dry_bean_dataset.metadata)
|
||||
|
||||
# variable information
|
||||
# print(dry_bean_dataset.variables)
|
||||
|
||||
# print(X.head())
|
||||
# print(y.head())
|
||||
# y_class = to_categorical(y)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
x, y_class, test_size=0.2, random_state=42, shuffle=True
|
||||
)
|
||||
return x_train, x_test, y_train, y_test
|
||||
|
||||
|
||||
x_train, x_test, y_train, y_test = get_data()
|
||||
model = make_model()
|
||||
early_stopping = keras.callbacks.EarlyStopping(
|
||||
patience=10, min_delta=0.001, restore_best_weights=True
|
||||
)
|
||||
|
||||
|
||||
model.compile(
|
||||
loss="sparse_categorical_crossentropy",
|
||||
optimizer="adam",
|
||||
metrics=["accuracy", "mse"],
|
||||
)
|
||||
|
||||
model.summary()
|
||||
|
||||
history = model.fit(
|
||||
x_train, y_train, epochs=150, batch_size=10, callbacks=[early_stopping]
|
||||
)
|
||||
score = model.evaluate(x_test, y_test, verbose=2)
|
||||
71
test/digits.py
Normal file
71
test/digits.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from keras.layers import Dense
|
||||
from keras.models import Sequential
|
||||
from keras.utils import to_categorical
|
||||
from sklearn.datasets import load_digits
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
from pso import optimizer
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(Dense(12, input_dim=64, activation="relu"))
|
||||
model.add(Dense(10, activation="relu"))
|
||||
model.add(Dense(10, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def get_data():
|
||||
digits = load_digits()
|
||||
X = digits.data
|
||||
y = digits.target
|
||||
|
||||
x = X.astype("float32")
|
||||
|
||||
y_class = to_categorical(y)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
x, y_class, test_size=0.2, random_state=42, shuffle=True
|
||||
)
|
||||
return x_train, x_test, y_train, y_test
|
||||
|
||||
|
||||
x_train, x_test, y_train, y_test = get_data()
|
||||
model = make_model()
|
||||
|
||||
digits_pso = optimizer(
|
||||
model,
|
||||
loss="categorical_crossentropy",
|
||||
n_particles=300,
|
||||
c0=0.5,
|
||||
c1=0.3,
|
||||
w_min=0.2,
|
||||
w_max=0.9,
|
||||
negative_swarm=0,
|
||||
mutation_swarm=0.1,
|
||||
convergence_reset=True,
|
||||
convergence_reset_patience=10,
|
||||
convergence_reset_monitor="loss",
|
||||
convergence_reset_min_delta=0.001,
|
||||
)
|
||||
|
||||
digits_pso.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=500,
|
||||
validate_data=(x_test, y_test),
|
||||
log=2,
|
||||
save_info=True,
|
||||
renewal="loss",
|
||||
log_name="digits",
|
||||
)
|
||||
|
||||
print("Done!")
|
||||
|
||||
sys.exit(0)
|
||||
81
test/digits_tf.py
Normal file
81
test/digits_tf.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pandas as pd
|
||||
import tensorflow as tf
|
||||
from sklearn.datasets import load_digits
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.layers import Dense
|
||||
from tensorflow.keras.models import Sequential
|
||||
from tensorflow.keras.utils import to_categorical
|
||||
|
||||
|
||||
gpus = tf.config.experimental.list_physical_devices("GPU")
|
||||
if gpus:
|
||||
try:
|
||||
tf.config.experimental.set_memory_growth(gpus[0], True)
|
||||
except RuntimeError as r:
|
||||
print(r)
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(Dense(12, input_dim=64, activation="relu"))
|
||||
model.add(Dense(12, activation="relu"))
|
||||
model.add(Dense(10, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def get_data():
|
||||
digits = load_digits()
|
||||
X = digits.data
|
||||
y = digits.target
|
||||
|
||||
x = X.astype("float32")
|
||||
|
||||
y_class = to_categorical(y)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
x, y_class, test_size=0.2, random_state=42, shuffle=True
|
||||
)
|
||||
return x_train, x_test, y_train, y_test
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
model = make_model()
|
||||
x_train, x_test, y_train, y_test = get_data()
|
||||
|
||||
callbacks = [
|
||||
tf.keras.callbacks.EarlyStopping(
|
||||
monitor="val_loss", patience=10, restore_best_weights=True
|
||||
)
|
||||
]
|
||||
|
||||
print(x_train.shape, y_train.shape)
|
||||
|
||||
model.compile(
|
||||
optimizer="adam",
|
||||
loss="categorical_crossentropy",
|
||||
metrics=["accuracy", "mse"],
|
||||
)
|
||||
|
||||
print(model.summary())
|
||||
|
||||
history = model.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=500,
|
||||
batch_size=32,
|
||||
verbose=1,
|
||||
validation_data=(x_test, y_test),
|
||||
callbacks=callbacks,
|
||||
)
|
||||
|
||||
print("Done!")
|
||||
|
||||
sys.exit(0)
|
||||
87
test/fashion_mnist.py
Normal file
87
test/fashion_mnist.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# %%
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from keras.datasets import fashion_mnist
|
||||
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
||||
from keras.models import Sequential
|
||||
|
||||
from pso import optimizer
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
def get_data():
|
||||
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
|
||||
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
x_train = x_train.reshape((60000, 28, 28, 1))
|
||||
x_test = x_test.reshape((10000, 28, 28, 1))
|
||||
|
||||
y_train, y_test = tf.one_hot(y_train, 10), tf.one_hot(y_test, 10)
|
||||
|
||||
x_train, x_test = tf.convert_to_tensor(x_train), tf.convert_to_tensor(x_test)
|
||||
y_train, y_test = tf.convert_to_tensor(y_train), tf.convert_to_tensor(y_test)
|
||||
|
||||
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
|
||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
||||
|
||||
return x_train, y_train, x_test, y_test
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(
|
||||
Conv2D(32, kernel_size=(5, 5), activation="relu", input_shape=(28, 28, 1))
|
||||
)
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Flatten())
|
||||
model.add(Dropout(0.25))
|
||||
model.add(Dense(256, activation="relu"))
|
||||
model.add(Dense(128, activation="relu"))
|
||||
model.add(Dense(10, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
# %%
|
||||
model = make_model()
|
||||
x_train, y_train, x_test, y_test = get_data()
|
||||
|
||||
|
||||
pso_mnist = optimizer(
|
||||
model,
|
||||
loss="categorical_crossentropy",
|
||||
n_particles=200,
|
||||
c0=0.7,
|
||||
c1=0.5,
|
||||
w_min=0.1,
|
||||
w_max=0.8,
|
||||
negative_swarm=0.0,
|
||||
mutation_swarm=0.05,
|
||||
convergence_reset=True,
|
||||
convergence_reset_patience=10,
|
||||
convergence_reset_monitor="loss",
|
||||
)
|
||||
|
||||
best_score = pso_mnist.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=1000,
|
||||
save_info=True,
|
||||
log=2,
|
||||
log_name="fashion_mnist",
|
||||
renewal="loss",
|
||||
check_point=25,
|
||||
batch_size=5000,
|
||||
)
|
||||
|
||||
print("Done!")
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
115
test/fashion_mnist_tf.py
Normal file
115
test/fashion_mnist_tf.py
Normal file
@@ -0,0 +1,115 @@
|
||||
from keras.models import Sequential
|
||||
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
||||
from keras.datasets import mnist, fashion_mnist
|
||||
from keras.utils import to_categorical
|
||||
# from tensorflow.data.Dataset import from_tensor_slices
|
||||
import tensorflow as tf
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
gpus = tf.config.experimental.list_physical_devices("GPU")
|
||||
if gpus:
|
||||
try:
|
||||
tf.config.experimental.set_memory_growth(gpus[0], True)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
finally:
|
||||
del gpus
|
||||
|
||||
|
||||
def get_data():
|
||||
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
|
||||
print(f"y_train : {y_train[0]} | y_test : {y_test[0]}")
|
||||
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
x_train = x_train.reshape((60000, 28, 28, 1))
|
||||
x_test = x_test.reshape((10000, 28, 28, 1))
|
||||
|
||||
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
|
||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
||||
|
||||
return x_train, y_train, x_test, y_test
|
||||
|
||||
|
||||
class _batch_generator:
|
||||
def __init__(self, x, y, batch_size: int = 32):
|
||||
self.batch_size = batch_size
|
||||
self.index = 0
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.setBatchSize(batch_size)
|
||||
|
||||
def next(self):
|
||||
self.index += 1
|
||||
if self.index >= self.max_index:
|
||||
self.index = 0
|
||||
return self.dataset[self.index][0], self.dataset[self.index][1]
|
||||
|
||||
def getMaxIndex(self):
|
||||
return self.max_index
|
||||
|
||||
def getIndex(self):
|
||||
return self.index
|
||||
|
||||
def setIndex(self, index):
|
||||
self.index = index
|
||||
|
||||
def getBatchSize(self):
|
||||
return self.batch_size
|
||||
|
||||
def setBatchSize(self, batch_size):
|
||||
self.batch_size = batch_size
|
||||
self.dataset = list(
|
||||
tf.data.Dataset.from_tensor_slices(
|
||||
(self.x, self.y)).batch(batch_size)
|
||||
)
|
||||
self.max_index = len(self.dataset)
|
||||
|
||||
def getDataset(self):
|
||||
return self.dataset
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(
|
||||
Conv2D(32, kernel_size=(5, 5), activation="sigmoid",
|
||||
input_shape=(28, 28, 1))
|
||||
)
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Conv2D(64, kernel_size=(3, 3), activation="sigmoid"))
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Flatten())
|
||||
model.add(Dropout(0.25))
|
||||
model.add(Dense(128, activation="sigmoid"))
|
||||
model.add(Dense(10, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
model = make_model()
|
||||
x_train, y_train, x_test, y_test = get_data()
|
||||
print(x_train.shape)
|
||||
y_train = tf.one_hot(y_train, 10)
|
||||
y_test = tf.one_hot(y_test, 10)
|
||||
|
||||
dataset = _batch_generator(x_train, y_train, 32)
|
||||
|
||||
model.compile(optimizer="adam", loss="mse", metrics=["accuracy"])
|
||||
|
||||
count = 0
|
||||
|
||||
while count < 100:
|
||||
x_batch, y_batch = dataset.next()
|
||||
count += 1
|
||||
print("Training model...")
|
||||
model.fit(x_batch, y_batch, epochs=1, batch_size=1, verbose=1)
|
||||
|
||||
print(count)
|
||||
print(f"Max index : {dataset.getMaxIndex()}")
|
||||
|
||||
print("Evaluating model...")
|
||||
model.evaluate(x_test, y_test, verbose=2)
|
||||
|
||||
weights = model.get_weights()
|
||||
73
test/iris.py
Normal file
73
test/iris.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import gc
|
||||
import os
|
||||
import sys
|
||||
|
||||
from sklearn.datasets import load_iris
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.models import Sequential
|
||||
|
||||
from pso import optimizer
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(layers.Dense(10, activation="relu", input_shape=(4,)))
|
||||
model.add(layers.Dense(10, activation="relu"))
|
||||
model.add(layers.Dense(3, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def load_data():
|
||||
iris = load_iris()
|
||||
x = iris.data
|
||||
y = iris.target
|
||||
|
||||
y = keras.utils.to_categorical(y, 3)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
x, y, test_size=0.2, shuffle=True, stratify=y
|
||||
)
|
||||
|
||||
return x_train, x_test, y_train, y_test
|
||||
|
||||
|
||||
model = make_model()
|
||||
x_train, x_test, y_train, y_test = load_data()
|
||||
|
||||
|
||||
pso_iris = optimizer(
|
||||
model,
|
||||
loss="categorical_crossentropy",
|
||||
n_particles=100,
|
||||
c0=0.5,
|
||||
c1=0.3,
|
||||
w_min=0.1,
|
||||
w_max=0.9,
|
||||
negative_swarm=0,
|
||||
mutation_swarm=0.1,
|
||||
convergence_reset=True,
|
||||
convergence_reset_patience=10,
|
||||
convergence_reset_monitor="loss",
|
||||
convergence_reset_min_delta=0.001,
|
||||
)
|
||||
|
||||
best_score = pso_iris.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=500,
|
||||
save_info=True,
|
||||
log=2,
|
||||
log_name="iris",
|
||||
renewal="loss",
|
||||
check_point=25,
|
||||
validate_data=(x_test, y_test),
|
||||
)
|
||||
|
||||
gc.collect()
|
||||
print("Done!")
|
||||
sys.exit(0)
|
||||
55
test/iris_tf.py
Normal file
55
test/iris_tf.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
gpus = tf.config.experimental.list_physical_devices("GPU")
|
||||
if gpus:
|
||||
try:
|
||||
# tf.config.experimental.set_visible_devices(gpus[0], "GPU")
|
||||
tf.config.experimental.set_memory_growth(gpus[0], True)
|
||||
except RuntimeError as e:
|
||||
print(e)
|
||||
|
||||
from sklearn.datasets import load_iris
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.models import Sequential
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(layers.Dense(10, activation="relu", input_shape=(4,)))
|
||||
model.add(layers.Dense(10, activation="relu"))
|
||||
model.add(layers.Dense(3, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def load_data():
|
||||
iris = load_iris()
|
||||
x = iris.data
|
||||
y = iris.target
|
||||
|
||||
y = keras.utils.to_categorical(y, 3)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
x, y, test_size=0.2, shuffle=True, stratify=y
|
||||
)
|
||||
|
||||
return x_train, x_test, y_train, y_test
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
model = make_model()
|
||||
x_train, x_test, y_train, y_test = load_data()
|
||||
print(x_train.shape, y_train.shape)
|
||||
|
||||
loss = ["categorical_crossentropy", "accuracy", "mse"]
|
||||
metrics = ["accuracy"]
|
||||
|
||||
model.compile(optimizer="sgd", loss=loss[0], metrics=metrics[0])
|
||||
model.fit(x_train, y_train, epochs=200, batch_size=32, validation_split=0.2)
|
||||
model.evaluate(x_test, y_test, batch_size=32)
|
||||
88
test/mnist.py
Normal file
88
test/mnist.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# %%
|
||||
import os
|
||||
import sys
|
||||
|
||||
from pso import optimizer
|
||||
|
||||
import tensorflow as tf
|
||||
from keras.datasets import mnist
|
||||
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
||||
from keras.models import Sequential
|
||||
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
def get_data():
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
x_train = x_train.reshape((60000, 28, 28, 1))
|
||||
x_test = x_test.reshape((10000, 28, 28, 1))
|
||||
|
||||
y_train, y_test = tf.one_hot(y_train, 10), tf.one_hot(y_test, 10)
|
||||
|
||||
x_train, x_test = tf.convert_to_tensor(x_train), tf.convert_to_tensor(x_test)
|
||||
y_train, y_test = tf.convert_to_tensor(y_train), tf.convert_to_tensor(y_test)
|
||||
|
||||
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
|
||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
||||
|
||||
return x_train, y_train, x_test, y_test
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(
|
||||
Conv2D(32, kernel_size=(5, 5), activation="relu", input_shape=(28, 28, 1))
|
||||
)
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Dropout(0.5))
|
||||
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Flatten())
|
||||
model.add(Dropout(0.5))
|
||||
model.add(Dense(256, activation="relu"))
|
||||
model.add(Dense(128, activation="relu"))
|
||||
model.add(Dense(10, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
# %%
|
||||
model = make_model()
|
||||
x_train, y_train, x_test, y_test = get_data()
|
||||
|
||||
|
||||
pso_mnist = optimizer(
|
||||
model,
|
||||
loss="categorical_crossentropy",
|
||||
n_particles=200,
|
||||
c0=0.7,
|
||||
c1=0.4,
|
||||
w_min=0.1,
|
||||
w_max=0.9,
|
||||
negative_swarm=0.0,
|
||||
mutation_swarm=0.05,
|
||||
convergence_reset=True,
|
||||
convergence_reset_patience=10,
|
||||
convergence_reset_monitor="loss",
|
||||
convergence_reset_min_delta=0.005,
|
||||
)
|
||||
|
||||
best_score = pso_mnist.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=1000,
|
||||
save_info=True,
|
||||
log=2,
|
||||
log_name="mnist",
|
||||
renewal="loss",
|
||||
check_point=25,
|
||||
batch_size=5000,
|
||||
validate_data=(x_test, y_test),
|
||||
)
|
||||
|
||||
print("Done!")
|
||||
|
||||
sys.exit(0)
|
||||
132
test/mnist_tf.py
Normal file
132
test/mnist_tf.py
Normal file
@@ -0,0 +1,132 @@
|
||||
from keras.models import Sequential
|
||||
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
||||
from keras.datasets import mnist
|
||||
from keras.utils import to_categorical
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
# from tensorflow.data.Dataset import from_tensor_slices
|
||||
import tensorflow as tf
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
gpus = tf.config.experimental.list_physical_devices("GPU")
|
||||
if gpus:
|
||||
try:
|
||||
tf.config.experimental.set_memory_growth(gpus[0], True)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
finally:
|
||||
del gpus
|
||||
|
||||
|
||||
def get_data():
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
x_train = x_train.reshape((60000, 28, 28, 1))
|
||||
x_test = x_test.reshape((10000, 28, 28, 1))
|
||||
|
||||
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
|
||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
||||
|
||||
return x_train, y_train, x_test, y_test
|
||||
|
||||
class _batch_generator_:
|
||||
def __init__(self, x, y, batch_size: int = None):
|
||||
self.index = 0
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.setBatchSize(batch_size)
|
||||
|
||||
def next(self):
|
||||
self.index += 1
|
||||
if self.index >= self.max_index:
|
||||
self.index = 0
|
||||
self.__getBatchSlice(self.batch_size)
|
||||
return self.dataset[self.index][0], self.dataset[self.index][1]
|
||||
|
||||
def getMaxIndex(self):
|
||||
return self.max_index
|
||||
|
||||
def getIndex(self):
|
||||
return self.index
|
||||
|
||||
def setIndex(self, index):
|
||||
self.index = index
|
||||
|
||||
def getBatchSize(self):
|
||||
return self.batch_size
|
||||
|
||||
def setBatchSize(self, batch_size: int = None):
|
||||
if batch_size is None:
|
||||
batch_size = len(self.x) // 10
|
||||
elif batch_size > len(self.x):
|
||||
batch_size = len(self.x)
|
||||
self.batch_size = batch_size
|
||||
print(f"batch size : {self.batch_size}")
|
||||
self.dataset = self.__getBatchSlice(self.batch_size)
|
||||
self.max_index = len(self.dataset)
|
||||
|
||||
def __getBatchSlice(self, batch_size):
|
||||
return list(
|
||||
tf.data.Dataset.from_tensor_slices((self.x, self.y))
|
||||
.shuffle(len(self.x))
|
||||
.batch(batch_size)
|
||||
)
|
||||
|
||||
def getDataset(self):
|
||||
return self.dataset
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(
|
||||
Conv2D(64, kernel_size=(5, 5), activation="relu", input_shape=(28, 28, 1))
|
||||
)
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Dropout(0.5))
|
||||
model.add(Conv2D(128, kernel_size=(3, 3), activation="relu"))
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Flatten())
|
||||
model.add(Dropout(0.5))
|
||||
model.add(Dense(2048, activation="relu"))
|
||||
model.add(Dropout(0.8))
|
||||
model.add(Dense(1024, activation="relu"))
|
||||
model.add(Dropout(0.8))
|
||||
model.add(Dense(10, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
model = make_model()
|
||||
x_train, y_train, x_test, y_test = get_data()
|
||||
y_train = tf.one_hot(y_train, 10)
|
||||
y_test = tf.one_hot(y_test, 10)
|
||||
|
||||
batch = 64
|
||||
dataset = _batch_generator_(x_train, y_train, batch)
|
||||
|
||||
model.compile(
|
||||
optimizer="adam",
|
||||
loss="categorical_crossentropy",
|
||||
metrics=["accuracy", "mse"],
|
||||
)
|
||||
|
||||
count = 0
|
||||
print(f"batch size : {batch}")
|
||||
print("iter " + str(dataset.getMaxIndex()))
|
||||
print("Training model...")
|
||||
# while count < dataset.getMaxIndex():
|
||||
# x_batch, y_batch = dataset.next()
|
||||
# count += 1
|
||||
# print(f"iter {count}/{dataset.getMaxIndex()}")
|
||||
model.fit(x_train, y_train, epochs=1000, batch_size=batch, verbose=1)
|
||||
|
||||
print(count)
|
||||
|
||||
print("Evaluating model...")
|
||||
model.evaluate(x_test, y_test, verbose=1)
|
||||
|
||||
weights = model.get_weights()
|
||||
112
test/seeds.py
Normal file
112
test/seeds.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# %%
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import tensorflow as tf
|
||||
from keras.layers import Dense
|
||||
from keras.models import Sequential
|
||||
from keras.utils import to_categorical
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow import keras
|
||||
|
||||
from pso import optimizer
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
def get_data():
|
||||
with open("data/seeds/seeds_dataset.txt", "r", encoding="utf-8") as f:
|
||||
data = f.readlines()
|
||||
df = pd.DataFrame([d.split() for d in data])
|
||||
df.columns = [
|
||||
"area",
|
||||
"perimeter",
|
||||
"compactness",
|
||||
"length_of_kernel",
|
||||
"width_of_kernel",
|
||||
"asymmetry_coefficient",
|
||||
"length_of_kernel_groove",
|
||||
"target",
|
||||
]
|
||||
|
||||
df = df.astype(float)
|
||||
df["target"] = df["target"].astype(int)
|
||||
|
||||
x = df.iloc[:, :-1].values.round(0).astype(int)
|
||||
y = df.iloc[:, -1].values
|
||||
|
||||
y_class = to_categorical(y)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
x, y_class, test_size=0.2, shuffle=True
|
||||
)
|
||||
|
||||
return x_train, y_train, x_test, y_test
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(Dense(16, activation="relu", input_shape=(7,)))
|
||||
model.add(Dense(32, activation="relu"))
|
||||
model.add(Dense(4, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
# %%
|
||||
model = make_model()
|
||||
x_train, y_train, x_test, y_test = get_data()
|
||||
|
||||
loss = [
|
||||
"mean_squared_error",
|
||||
"categorical_crossentropy",
|
||||
"sparse_categorical_crossentropy",
|
||||
"binary_crossentropy",
|
||||
"kullback_leibler_divergence",
|
||||
"poisson",
|
||||
"cosine_similarity",
|
||||
"log_cosh",
|
||||
"huber_loss",
|
||||
"mean_absolute_error",
|
||||
"mean_absolute_percentage_error",
|
||||
]
|
||||
|
||||
# rs = random_state()
|
||||
|
||||
pso_mnist = optimizer(
|
||||
model,
|
||||
loss="categorical_crossentropy",
|
||||
n_particles=100,
|
||||
c0=0.5,
|
||||
c1=1.0,
|
||||
w_min=0.7,
|
||||
w_max=1.2,
|
||||
negative_swarm=0.0,
|
||||
mutation_swarm=0.3,
|
||||
convergence_reset=True,
|
||||
convergence_reset_patience=10,
|
||||
convergence_reset_monitor="mse",
|
||||
convergence_reset_min_delta=0.0005,
|
||||
)
|
||||
|
||||
best_score = pso_mnist.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=500,
|
||||
save_info=True,
|
||||
log=2,
|
||||
log_name="seeds",
|
||||
renewal="acc",
|
||||
check_point=25,
|
||||
empirical_balance=False,
|
||||
dispersion=False,
|
||||
back_propagation=False,
|
||||
validate_data=(x_test, y_test),
|
||||
)
|
||||
|
||||
print("Done!")
|
||||
|
||||
sys.exit(0)
|
||||
76
test/xor.py
Normal file
76
test/xor.py
Normal file
@@ -0,0 +1,76 @@
|
||||
# %%
|
||||
import os
|
||||
import sys
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.layers import Dense
|
||||
from tensorflow.keras.models import Sequential
|
||||
|
||||
from pso import optimizer
|
||||
|
||||
|
||||
def get_data():
|
||||
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
|
||||
y = np.array([[0], [1], [1], [0]])
|
||||
return x, y
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(layers.Dense(2, activation="sigmoid", input_shape=(2,)))
|
||||
model.add(layers.Dense(1, activation="sigmoid"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
# %%
|
||||
model = make_model()
|
||||
x_test, y_test = get_data()
|
||||
|
||||
loss = [
|
||||
"mean_squared_error",
|
||||
"mean_squared_logarithmic_error",
|
||||
"binary_crossentropy",
|
||||
"categorical_crossentropy",
|
||||
"sparse_categorical_crossentropy",
|
||||
"kullback_leibler_divergence",
|
||||
"poisson",
|
||||
"cosine_similarity",
|
||||
"log_cosh",
|
||||
"huber_loss",
|
||||
"mean_absolute_error",
|
||||
"mean_absolute_percentage_error",
|
||||
]
|
||||
|
||||
pso_xor = optimizer(
|
||||
model,
|
||||
loss=loss[0],
|
||||
n_particles=100,
|
||||
c0=0.35,
|
||||
c1=0.8,
|
||||
w_min=0.6,
|
||||
w_max=1.2,
|
||||
negative_swarm=0.1,
|
||||
mutation_swarm=0.2,
|
||||
particle_min=-3,
|
||||
particle_max=3,
|
||||
)
|
||||
best_score = pso_xor.fit(
|
||||
x_test,
|
||||
y_test,
|
||||
epochs=200,
|
||||
save_info=True,
|
||||
log=2,
|
||||
log_name="xor",
|
||||
renewal="acc",
|
||||
check_point=25,
|
||||
)
|
||||
|
||||
print("Done!")
|
||||
sys.exit(0)
|
||||
# %%
|
||||
Reference in New Issue
Block a user