dev container 실행 코드 추가
This commit is contained in:
jung-geun
2023-07-06 22:04:42 +09:00
parent c7384cdf7b
commit c163de6cb6
11 changed files with 192 additions and 132 deletions

View File

@@ -1,16 +0,0 @@
FROM mcr.microsoft.com/devcontainers/miniconda:0-3
# Copy environment.yml (if found) to a temp location so we update the environment. Also
# copy "noop.txt" so the COPY instruction does not fail if no environment.yml exists.
COPY environment.yml* .devcontainer/noop.txt /tmp/conda-tmp/
RUN if [ -f "/tmp/conda-tmp/environment.yml" ]; then umask 0002 && /opt/conda/bin/conda env update -n base -f /tmp/conda-tmp/environment.yml; fi \
&& rm -rf /tmp/conda-tmp
# [Optional] Uncomment to install a different version of Python than the default
# RUN conda install -y python=3.6 \
# && pip install --no-cache-dir pipx \
# && pipx reinstall-all
# [Optional] Uncomment this section to install additional OS packages.
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# && apt-get -y install --no-install-recommends <your-package-list-here>

View File

@@ -20,12 +20,9 @@
"ghcr.io/devcontainers/features/python:1": { "ghcr.io/devcontainers/features/python:1": {
"installTools": true, "installTools": true,
"version": "3.9" "version": "3.9"
}
}, },
"ghcr.io/rocker-org/devcontainer-features/miniforge:1": { "postCreateCommand": "conda env create --file environment.yaml --name pso"
"version": "latest",
"variant": "Miniforge-pypy3"
}
}
// Features to add to the dev container. More info: https://containers.dev/features. // Features to add to the dev container. More info: https://containers.dev/features.
// "features": {}, // "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally. // Use 'forwardPorts' to make a list of ports inside the container available locally.

View File

@@ -1,3 +0,0 @@
This file is copied into the container along with environment.yml* from the
parent folder. This is done to prevent the Dockerfile COPY instruction from
failing if no environment.yml is found.

View File

@@ -20,5 +20,5 @@ jobs:
echo $CONDA/bin >> $GITHUB_PATH echo $CONDA/bin >> $GITHUB_PATH
- name: Install dependencies - name: Install dependencies
run: | run: |
conda env update --file environment.yaml --name pso conda env create --file environment.yaml --name pso
conda activate pso conda activate pso

6
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,6 @@
{
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"python.formatting.provider": "none"
}

22
iris.py
View File

@@ -1,6 +1,6 @@
import os import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import gc import gc
@@ -16,12 +16,13 @@ from tensorflow.keras.models import Sequential
def make_model(): def make_model():
model = Sequential() model = Sequential()
model.add(layers.Dense(10, activation='relu', input_shape=(4,))) model.add(layers.Dense(10, activation="relu", input_shape=(4,)))
model.add(layers.Dense(10, activation='relu')) model.add(layers.Dense(10, activation="relu"))
model.add(layers.Dense(3, activation='softmax')) model.add(layers.Dense(3, activation="softmax"))
return model return model
def load_data(): def load_data():
iris = load_iris() iris = load_iris()
x = iris.data x = iris.data
@@ -29,14 +30,17 @@ def load_data():
y = keras.utils.to_categorical(y, 3) y = keras.utils.to_categorical(y, 3)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, shuffle=True, stratify=y) x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, shuffle=True, stratify=y
)
return x_train, x_test, y_train, y_test return x_train, x_test, y_train, y_test
model = make_model() model = make_model()
x_train, x_test, y_train, y_test = load_data() x_train, x_test, y_train, y_test = load_data()
loss = ['categorical_crossentropy'] loss = ["categorical_crossentropy"]
pso_iris = Optimizer( pso_iris = Optimizer(
model, model,
@@ -48,7 +52,7 @@ pso_iris = Optimizer(
w_max=1.0, w_max=1.0,
negative_swarm=0.1, negative_swarm=0.1,
mutation_swarm=0.2, mutation_swarm=0.2,
) )
best_score = pso_iris.fit( best_score = pso_iris.fit(
x_train, x_train,
@@ -59,7 +63,7 @@ best_score = pso_iris.fit(
renewal="acc", renewal="acc",
empirical_balance=False, empirical_balance=False,
Dispersion=False, Dispersion=False,
check_point=25 check_point=25,
) )
gc.collect() gc.collect()

View File

@@ -1,7 +1,7 @@
# %% # %%
import os import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import gc import gc
@@ -26,31 +26,46 @@ def get_data():
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}") print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
return x_train, y_train, x_test, y_test return x_train, y_train, x_test, y_test
def get_data_test(): def get_data_test():
(x_train, y_train), (x_test, y_test) = mnist.load_data() (x_train, y_train), (x_test, y_test) = mnist.load_data()
x_test = x_test.reshape((10000, 28, 28, 1)) x_test = x_test.reshape((10000, 28, 28, 1))
return x_test, y_test return x_test, y_test
def make_model(): def make_model():
model = Sequential() model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), model.add(
activation='relu', input_shape=(28, 28, 1))) Conv2D(32, kernel_size=(5, 5), activation="relu", input_shape=(28, 28, 1))
)
model.add(MaxPooling2D(pool_size=(3, 3))) model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2))) model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25)) model.add(Dropout(0.25))
model.add(Flatten()) model.add(Flatten())
model.add(Dense(128, activation='relu')) model.add(Dense(128, activation="relu"))
model.add(Dense(10, activation='softmax')) model.add(Dense(10, activation="softmax"))
return model return model
# %% # %%
model = make_model() model = make_model()
x_test, y_test = get_data_test() x_test, y_test = get_data_test()
loss = ['mse', 'categorical_crossentropy', 'binary_crossentropy', 'kullback_leibler_divergence', 'poisson', 'cosine_similarity', 'log_cosh', 'huber_loss', 'mean_absolute_error', 'mean_absolute_percentage_error'] loss = [
"mse",
"categorical_crossentropy",
"binary_crossentropy",
"kullback_leibler_divergence",
"poisson",
"cosine_similarity",
"log_cosh",
"huber_loss",
"mean_absolute_error",
"mean_absolute_percentage_error",
]
if __name__ == "__main__": if __name__ == "__main__":
try: try:
@@ -61,9 +76,9 @@ if __name__ == "__main__":
c0=0.35, c0=0.35,
c1=0.8, c1=0.8,
w_min=0.7, w_min=0.7,
w_max=1.0, w_max=1.1,
negative_swarm=0.2, negative_swarm=0.2,
mutation_swarm=0.2, mutation_swarm=0.1,
) )
best_score = pso_mnist.fit( best_score = pso_mnist.fit(
@@ -75,7 +90,7 @@ if __name__ == "__main__":
renewal="acc", renewal="acc",
empirical_balance=False, empirical_balance=False,
Dispersion=False, Dispersion=False,
check_point=25 check_point=25,
) )
except Exception as e: except Exception as e:
print(e) print(e)

File diff suppressed because one or more lines are too long

View File

@@ -15,10 +15,13 @@ gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus: if gpus:
try: try:
# tf.config.experimental.set_visible_devices(gpus[0], "GPU") # tf.config.experimental.set_visible_devices(gpus[0], "GPU")
print(tf.config.experimental.get_visible_devices("GPU"))
tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_memory_growth(gpus[0], True)
print("set memory growth")
except RuntimeError as e: except RuntimeError as e:
print(e) print(e)
class Optimizer: class Optimizer:
""" """
particle swarm optimization particle swarm optimization
@@ -66,10 +69,11 @@ class Optimizer:
self.w_max = w_max # 최대 관성 수치 self.w_max = w_max # 최대 관성 수치
self.negative_swarm = negative_swarm # 최적해와 반대로 이동할 파티클 비율 - 0 ~ 1 사이의 값 self.negative_swarm = negative_swarm # 최적해와 반대로 이동할 파티클 비율 - 0 ~ 1 사이의 값
self.mutation_swarm = mutation_swarm # 관성을 추가로 사용할 파티클 비율 - 0 ~ 1 사이의 값 self.mutation_swarm = mutation_swarm # 관성을 추가로 사용할 파티클 비율 - 0 ~ 1 사이의 값
self.g_best_score = [0 , np.inf] # 최고 점수 - 시작은 0으로 초기화 self.g_best_score = [0, np.inf] # 최고 점수 - 시작은 0으로 초기화
self.g_best = None # 최고 점수를 받은 가중치 self.g_best = None # 최고 점수를 받은 가중치
self.g_best_ = None # 최고 점수를 받은 가중치 - 값의 분산을 위한 변수 self.g_best_ = None # 최고 점수를 받은 가중치 - 값의 분산을 위한 변수
self.avg_score = 0 # 평균 점수 self.avg_score = 0 # 평균 점수
self.save_path = None # 저장 위치
negative_count = 0 negative_count = 0
@@ -90,7 +94,9 @@ class Optimizer:
negative_count += 1 negative_count += 1
print(f"negative swarm : {negative_count} / {self.n_particles}") print(f"negative swarm : {negative_count} / {self.n_particles}")
print(f"mutation swarm : {mutation_swarm * self.n_particles} / {self.n_particles}") print(
f"mutation swarm : {mutation_swarm * self.n_particles} / {self.n_particles}"
)
gc.collect() gc.collect()
@@ -110,7 +116,6 @@ class Optimizer:
del self.avg_score del self.avg_score
gc.collect() gc.collect()
def _encode(self, weights): def _encode(self, weights):
""" """
가중치를 1차원으로 풀어서 반환 가중치를 1차원으로 풀어서 반환
@@ -135,7 +140,6 @@ class Optimizer:
return w_gpu, shape, length return w_gpu, shape, length
def _decode(self, weight, shape, length): def _decode(self, weight, shape, length):
""" """
_encode 로 인코딩된 가중치를 원본 shape으로 복원 _encode 로 인코딩된 가중치를 원본 shape으로 복원
@@ -183,7 +187,6 @@ class Optimizer:
else: else:
return 1 + np.abs(score) return 1 + np.abs(score)
def fit( def fit(
self, self,
x, x,
@@ -263,10 +266,17 @@ class Optimizer:
del local_score del local_score
gc.collect() gc.collect()
print(f"initial g_best_score : {self.g_best_score[0] if self.renewal == 'acc' else self.g_best_score[1]}") print(
f"initial g_best_score : {self.g_best_score[0] if self.renewal == 'acc' else self.g_best_score[1]}"
)
try: try:
epochs_pbar = tqdm(range(epochs), desc=f"best {self.g_best_score[0]:.4f}|{self.g_best_score[1]:.4f}", ascii=True, leave=True) epochs_pbar = tqdm(
range(epochs),
desc=f"best {self.g_best_score[0]:.4f}|{self.g_best_score[1]:.4f}",
ascii=True,
leave=True,
)
for epoch in epochs_pbar: for epoch in epochs_pbar:
acc = 0 acc = 0
loss = 0 loss = 0
@@ -277,9 +287,16 @@ class Optimizer:
ts = self.c0 + np.random.rand() * (self.c1 - self.c0) ts = self.c0 + np.random.rand() * (self.c1 - self.c0)
part_pbar = tqdm(range(len(self.particles)), desc=f"acc : {max_score:.4f} loss : {min_loss:.4f}", ascii=True, leave=False) part_pbar = tqdm(
range(len(self.particles)),
desc=f"acc : {max_score:.4f} loss : {min_loss:.4f}",
ascii=True,
leave=False,
)
for i in part_pbar: for i in part_pbar:
part_pbar.set_description(f"acc : {max_score:.4f} loss : {min_loss:.4f}") part_pbar.set_description(
f"acc : {max_score:.4f} loss : {min_loss:.4f}"
)
w = self.w_max - (self.w_max - self.w_min) * epoch / epochs w = self.w_max - (self.w_max - self.w_min) * epoch / epochs
g_, g_sh, g_len = self._encode(self.g_best) g_, g_sh, g_len = self._encode(self.g_best)
@@ -339,7 +356,9 @@ class Optimizer:
if score[0] < self.g_best_score[1]: if score[0] < self.g_best_score[1]:
self.g_best_score[1] = score[0] self.g_best_score[1] = score[0]
self.g_best = self.particles[i].get_best_weights() self.g_best = self.particles[i].get_best_weights()
epochs_pbar.set_description(f"best {self.g_best_score[0]:.4f} | {self.g_best_score[1]:.4f}") epochs_pbar.set_description(
f"best {self.g_best_score[0]:.4f} | {self.g_best_score[1]:.4f}"
)
elif renewal == "loss": elif renewal == "loss":
if score[0] <= self.g_best_score[1]: if score[0] <= self.g_best_score[1]:
if score[0] < self.g_best_score[1]: if score[0] < self.g_best_score[1]:
@@ -349,7 +368,9 @@ class Optimizer:
if score[1] > self.g_best_score[0]: if score[1] > self.g_best_score[0]:
self.g_best_score[0] = score[1] self.g_best_score[0] = score[1]
self.g_best = self.particles[i].get_best_weights() self.g_best = self.particles[i].get_best_weights()
epochs_pbar.set_description(f"best {self.g_best_score[0]:.4f} | {self.g_best_score[1]:.4f}") epochs_pbar.set_description(
f"best {self.g_best_score[0]:.4f} | {self.g_best_score[1]:.4f}"
)
if score[0] == None: if score[0] == None:
score[0] = np.inf score[0] = np.inf

View File

@@ -15,7 +15,10 @@ class Particle:
4. 가중치 업데이트 4. 가중치 업데이트
5. 2번으로 돌아가서 반복 5. 2번으로 돌아가서 반복
""" """
def __init__(self, model: keras.models, loss, negative: bool = False, mutation: float = 0):
def __init__(
self, model: keras.models, loss, negative: bool = False, mutation: float = 0
):
""" """
Args: Args:
model (keras.models): 학습 및 검증을 위한 모델 model (keras.models): 학습 및 검증을 위한 모델
@@ -68,7 +71,6 @@ class Particle:
return w_gpu, shape, length return w_gpu, shape, length
def _decode(self, weight: list, shape, length): def _decode(self, weight: list, shape, length):
""" """
_encode 로 인코딩된 가중치를 원본 shape으로 복원 _encode 로 인코딩된 가중치를 원본 shape으로 복원
@@ -114,7 +116,7 @@ class Particle:
self.best_score = score[1] self.best_score = score[1]
self.best_weights = self.model.get_weights() self.best_weights = self.model.get_weights()
elif renewal == "loss": elif renewal == "loss":
if score[0] == 'nan': if score[0] == "nan":
score[0] = np.inf score[0] = np.inf
if score[0] < self.best_score: if score[0] < self.best_score:
self.best_score = score[0] self.best_score = score[0]

50
xor.py
View File

@@ -1,10 +1,11 @@
# %% # %%
import os import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
# from pso_tf import PSO # from pso_tf import PSO
from pso import Optimizer from pso import Optimizer
from tensorflow import keras from tensorflow import keras
@@ -14,31 +15,64 @@ from tensorflow.keras.models import Sequential
print(tf.__version__) print(tf.__version__)
print(tf.config.list_physical_devices()) print(tf.config.list_physical_devices())
def get_data(): def get_data():
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]]) y = np.array([[0], [1], [1], [0]])
return x, y return x, y
def make_model(): def make_model():
leyer = [] leyer = []
leyer.append(layers.Dense(2, activation='sigmoid', input_shape=(2,))) leyer.append(layers.Dense(2, activation="sigmoid", input_shape=(2,)))
# leyer.append(layers.Dense(2, activation='sigmoid')) # leyer.append(layers.Dense(2, activation='sigmoid'))
leyer.append(layers.Dense(1, activation='sigmoid')) leyer.append(layers.Dense(1, activation="sigmoid"))
model = Sequential(leyer) model = Sequential(leyer)
return model return model
# %% # %%
model = make_model() model = make_model()
x_test, y_test = get_data() x_test, y_test = get_data()
loss = ['mean_squared_error', 'mean_squared_logarithmic_error', 'binary_crossentropy', 'categorical_crossentropy', 'sparse_categorical_crossentropy', 'kullback_leibler_divergence', 'poisson', 'cosine_similarity', 'log_cosh', 'huber_loss', 'mean_absolute_error', 'mean_absolute_percentage_error'] loss = [
"mean_squared_error",
"mean_squared_logarithmic_error",
"binary_crossentropy",
"categorical_crossentropy",
"sparse_categorical_crossentropy",
"kullback_leibler_divergence",
"poisson",
"cosine_similarity",
"log_cosh",
"huber_loss",
"mean_absolute_error",
"mean_absolute_percentage_error",
]
pso_xor = Optimizer(model, pso_xor = Optimizer(
loss=loss[0], n_particles=75, c0=0.35, c1=0.8, w_min=0.6, w_max=1.2, negative_swarm=0.25, mutation_swarm=0.25) model,
loss=loss[0],
n_particles=75,
c0=0.35,
c1=0.8,
w_min=0.6,
w_max=1.2,
negative_swarm=0.25,
mutation_swarm=0.25,
)
best_score = pso_xor.fit( best_score = pso_xor.fit(
x_test, y_test, epochs=200, save=True, save_path="./result/xor", renewal="acc", empirical_balance=False, Dispersion=False, check_point=25) x_test,
y_test,
epochs=200,
save=True,
save_path="./result/xor",
renewal="acc",
empirical_balance=False,
Dispersion=False,
check_point=25,
)
# %% # %%