From 6c6aa221f8794817d76b3dad1d2db55e2f0df778 Mon Sep 17 00:00:00 2001 From: jung-geun Date: Wed, 18 Oct 2023 16:40:52 +0900 Subject: [PATCH] =?UTF-8?q?23-10-18=20=EB=AA=A8=EB=8D=B8=20save=20?= =?UTF-8?q?=EC=8B=9C=20score=20=ED=99=95=EC=9D=B8=20=EC=B6=94=EA=B0=80=20?= =?UTF-8?q?=EB=B0=B0=EC=B9=98=20=EC=82=AC=EC=9D=B4=EC=A6=88=20=EC=98=A4?= =?UTF-8?q?=EB=A5=98=20=EC=88=98=EC=A0=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fashion_mnist.py | 17 --------------- fashion_mnist_tf.py | 50 +++++++++++++++++++++++++++++++++++++-------- pso/__init__.py | 2 +- pso/optimizer.py | 38 +++++++++++++++++++++++++++++----- 4 files changed, 76 insertions(+), 31 deletions(-) diff --git a/fashion_mnist.py b/fashion_mnist.py index 18bde8b..6b37d5a 100644 --- a/fashion_mnist.py +++ b/fashion_mnist.py @@ -33,23 +33,6 @@ def get_data(): return x_train, y_train, x_test, y_test -def get_data_test(): - (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() - x_test = x_test / 255.0 - x_test = x_test.reshape((10000, 28, 28, 1)) - - y_train, y_test = tf.one_hot(y_train, 10), tf.one_hot(y_test, 10) - - x_train, x_test = tf.convert_to_tensor( - x_train), tf.convert_to_tensor(x_test) - y_train, y_test = tf.convert_to_tensor( - y_train), tf.convert_to_tensor(y_test) - - print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}") - - return x_test, y_test - - def make_model(): model = Sequential() model.add( diff --git a/fashion_mnist_tf.py b/fashion_mnist_tf.py index 88b49ad..29337a9 100644 --- a/fashion_mnist_tf.py +++ b/fashion_mnist_tf.py @@ -21,6 +21,7 @@ if gpus: def get_data(): (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() + print(f"y_train : {y_train[0]} | y_test : {y_test[0]}") x_train, x_test = x_train / 255.0, x_test / 255.0 x_train = x_train.reshape((60000, 28, 28, 1)) @@ -36,9 +37,9 @@ class _batch_generator: def __init__(self, x, y, batch_size: int = 32): self.batch_size = batch_size self.index = 0 - dataset = tf.data.Dataset.from_tensor_slices((x, y)) - self.dataset = list(dataset.batch(batch_size)) - self.max_index = len(dataset) // batch_size + self.x = x + self.y = y + self.setBatchSize(batch_size) def next(self): self.index += 1 @@ -46,18 +47,50 @@ class _batch_generator: self.index = 0 return self.dataset[self.index][0], self.dataset[self.index][1] + def getMaxIndex(self): + return self.max_index + + def getIndex(self): + return self.index + + def setIndex(self, index): + self.index = index + + def getBatchSize(self): + return self.batch_size + + def setBatchSize(self, batch_size): + self.batch_size = batch_size + self.dataset = list( + tf.data.Dataset.from_tensor_slices( + (self.x, self.y)).batch(batch_size) + ) + self.max_index = len(self.dataset) + + def getDataset(self): + return self.dataset -# BEGIN: 5f8d9bcejpp -from keras.applications.resnet50 import ResNet50 def make_model(): - model = ResNet50(weights=None, input_shape=(28, 28, 1), classes=10) + model = Sequential() + model.add( + Conv2D(32, kernel_size=(5, 5), activation="relu", + input_shape=(28, 28, 1)) + ) + model.add(MaxPooling2D(pool_size=(3, 3))) + model.add(Conv2D(64, kernel_size=(3, 3), activation="relu")) + model.add(MaxPooling2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + model.add(Flatten()) + model.add(Dense(256, activation="relu")) + model.add(Dense(10, activation="softmax")) + return model -# END: 5f8d9bcejpp model = make_model() x_train, y_train, x_test, y_test = get_data() +print(x_train.shape) y_train = tf.one_hot(y_train, 10) y_test = tf.one_hot(y_test, 10) @@ -67,13 +100,14 @@ model.compile(optimizer="adam", loss="mse", metrics=["accuracy"]) count = 0 -while count < 50: +while count < 100: x_batch, y_batch = dataset.next() count += 1 print("Training model...") model.fit(x_batch, y_batch, epochs=1, batch_size=1, verbose=1) print(count) +print(f"Max index : {dataset.getMaxIndex()}") print("Evaluating model...") model.evaluate(x_test, y_test, verbose=2) diff --git a/pso/__init__.py b/pso/__init__.py index d87eb54..1c8935f 100644 --- a/pso/__init__.py +++ b/pso/__init__.py @@ -1,7 +1,7 @@ from .optimizer import Optimizer as optimizer from .particle import Particle as particle -__version__ = "0.1.8" +__version__ = "0.1.9" __all__ = [ "optimizer", diff --git a/pso/optimizer.py b/pso/optimizer.py index 94500b3..0a5304a 100644 --- a/pso/optimizer.py +++ b/pso/optimizer.py @@ -232,9 +232,9 @@ class Optimizer: def __init__(self, x, y, batch_size: int = 32): self.batch_size = batch_size self.index = 0 - dataset = tf.data.Dataset.from_tensor_slices((x, y)) - self.dataset = list(dataset.batch(batch_size)) - self.max_index = len(dataset) // batch_size + self.x = x + self.y = y + self.setBatchSize(batch_size) def next(self): self.index += 1 @@ -242,6 +242,32 @@ class Optimizer: self.index = 0 return self.dataset[self.index][0], self.dataset[self.index][1] + def getMaxIndex(self): + return self.max_index + + def getIndex(self): + return self.index + + def setIndex(self, index): + self.index = index + + def getBatchSize(self): + return self.batch_size + + def setBatchSize(self, batch_size): + self.batch_size = batch_size + if self.batch_size > len(self.x): + self.batch_size = len(self.x) + print(f"batch size : {self.batch_size}") + self.dataset = list( + tf.data.Dataset.from_tensor_slices( + (self.x, self.y)).batch(batch_size) + ) + self.max_index = len(self.dataset) + + def getDataset(self): + return self.dataset + def fit( self, x, @@ -575,7 +601,7 @@ class Optimizer: print(e) finally: - self.model_save(save_path) + self.model_save(x, y, save_path) print("model save") if save_info: self.save_info(save_path) @@ -658,7 +684,7 @@ class Optimizer: model = self.get_best_model() model.save_weights(save_path) - def model_save(self, save_path: str = "./result"): + def model_save(self, x, y, save_path: str = "./result"): """ 최고 점수를 받은 모델 저장 @@ -669,6 +695,8 @@ class Optimizer: (keras.models): 모델 """ model = self.get_best_model() + score = model.evaluate(x, y, verbose=1) + print(f"model acc : {score[1]}, loss : {score[0]}") model.save( f"./{save_path}/{self.day}/{self.n_particles}_{self.c0}_{self.c1}_{self.w_min}.h5" )