self.in_size = kwargs['in_size'] # Dim of the random variable to model (PV, wind power, etc) self.cond_in = kwargs['cond_in'] # Dim of context (weather forecasts, etc) self.latent_s = kwargs['latent_s'] # Dim of the latent space self.lambda_gp = kwargs['lambda_gp']
# Set GPU if available if kwargs['gpu']: self.device = torch.device("cuda:0"if torch.cuda.is_available() else"cpu") else: self.device = 'cpu'
# Build the discriminator alpha = 0.01 self.dis_net = [] for l1, l2 inzip(l_dis_net[:-1], l_dis_net[1:]): self.dis_net += [nn.Linear(l1, l2), nn.LeakyReLU(alpha)] self.dis_net.pop() # The last activation function is a ReLU to return a positive number self.dis_net.append(nn.ReLU()) self.dis = nn.Sequential(*self.dis_net)
# Discriminator's answers to generated and true samples D_true = self.dis(torch.cat((true_samples, context), dim=1)) D_generated = self.dis(torch.cat((generated_samples, context), dim=1)) # Compute Discriminator's loss with a gradient penalty to force Lipschitz condition gp = self.grad_pen(real=true_samples, samples=generated_samples, context=context) loss = -(torch.mean(D_true) - torch.mean(D_generated)) + self.lambda_gp * gp
super(Generator_linear, self).__init__() self.in_size = kwargs['in_size'] # Dim of the random variable to model (PV, wind power, etc) self.cond_in = kwargs['cond_in'] # Dim of context (weather forecasts, etc) self.latent_s = kwargs['latent_s'] # Dim of the latent space
# Set GPU if available if kwargs['gpu']: self.device = torch.device("cuda:0"if torch.cuda.is_available() else"cpu") else: self.device = 'cpu'
# Build the generator self.gen_net = [] for l1, l2 inzip(l_gen_net[:-1], l_gen_net[1:]): self.gen_net += [nn.Linear(l1, l2), nn.ReLU()] self.gen_net.pop() # Regression problem, no activation function at the last layer self.gen = nn.Sequential(*self.gen_net)