|
|
|
@ -170,9 +170,6 @@ def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loade
|
|
|
|
# logger.info([x.item() for x in losses] + [global_step, lr])
|
|
|
|
# logger.info([x.item() for x in losses] + [global_step, lr])
|
|
|
|
pbar_train_loader.set_postfix(loss=[round(y, 2) for y in [x.item() for x in losses]], lr=lr)
|
|
|
|
pbar_train_loader.set_postfix(loss=[round(y, 2) for y in [x.item() for x in losses]], lr=lr)
|
|
|
|
|
|
|
|
|
|
|
|
if rank == 0:
|
|
|
|
|
|
|
|
# logger.info('====> Epoch: {}'.format(epoch))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scalar_dict = {
|
|
|
|
scalar_dict = {
|
|
|
|
"loss/g/total": loss_gen_all,
|
|
|
|
"loss/g/total": loss_gen_all,
|
|
|
|
"loss/d/total": loss_disc_all,
|
|
|
|
"loss/d/total": loss_disc_all,
|
|
|
|
@ -190,9 +187,12 @@ def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loade
|
|
|
|
"slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
|
|
|
|
"slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
|
|
|
|
"all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
|
|
|
|
"all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
global_step += 1
|
|
|
|
utils.summarize(writer=writer, global_step=global_step, images=image_dict, scalars=scalar_dict)
|
|
|
|
utils.summarize(writer=writer, global_step=global_step, images=image_dict, scalars=scalar_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if rank == 0:
|
|
|
|
|
|
|
|
# logger.info('====> Epoch: {}'.format(epoch))
|
|
|
|
|
|
|
|
|
|
|
|
if epoch > 0 and epoch % hps.train.eval_interval == 0:
|
|
|
|
if epoch > 0 and epoch % hps.train.eval_interval == 0:
|
|
|
|
evaluate(hps, net_g, eval_loader, writer_eval)
|
|
|
|
evaluate(hps, net_g, eval_loader, writer_eval)
|
|
|
|
utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
|
|
|
|
utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
|
|
|
|
|