Skip to content
This repository was archived by the owner on Nov 19, 2025. It is now read-only.

Commit 94a9594

Browse files
committed
revert: "Merge branch 'dev' into main" (#431)
Signed-off-by: Terry Kong <terryk@nvidia.com>
1 parent d3e8866 commit 94a9594

14 files changed

Lines changed: 13 additions & 16 deletions

Dockerfile

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,8 +115,6 @@ RUN pip uninstall -y megatron-core && \
115115
fi && \
116116
pip install -e .
117117

118-
RUN pip install --no-cache-dir lightning # can remove this when NEMO_TAG is bumped to include lightning install
119-
120118
COPY --from=aligner-bump /opt/NeMo-Aligner /opt/NeMo-Aligner
121119
RUN cd /opt/NeMo-Aligner && \
122120
pip install --no-deps -e .

examples/nlp/gpt/serve_reward_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
import torch
16-
from lightning.pytorch.trainer.trainer import Trainer
16+
from pytorch_lightning.trainer.trainer import Trainer
1717

1818
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
1919
from nemo.core.config import hydra_runner

nemo_aligner/models/nlp/gpt/gpt_sft_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@
1616

1717
import hydra
1818
import torch
19-
from lightning.pytorch.trainer.trainer import Trainer
2019
from megatron.core.num_microbatches_calculator import get_micro_batch_size, get_num_microbatches
2120
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
2221
from omegaconf.dictconfig import DictConfig
22+
from pytorch_lightning.trainer.trainer import Trainer
2323

2424
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
2525
from nemo.collections.nlp.modules.common.megatron.utils import get_iterator_k_split

nemo_aligner/models/nlp/gpt/megatron_gpt_critic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,11 @@
1515
from enum import Enum
1616

1717
import torch
18-
from lightning.pytorch.trainer.trainer import Trainer
1918
from megatron.core.num_microbatches_calculator import get_num_microbatches, reconfigure_num_microbatches_calculator
2019
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
2120
from megatron.core.transformer.module import Float16Module
2221
from omegaconf.dictconfig import DictConfig
22+
from pytorch_lightning.trainer.trainer import Trainer
2323

2424
from nemo.collections.nlp.modules.common.megatron.utils import (
2525
average_losses_across_data_parallel_group,

nemo_aligner/models/nlp/gpt/megatron_gpt_dpo_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616
from functools import partial
1717

1818
import torch
19-
from lightning.pytorch.trainer.trainer import Trainer
2019
from megatron.core.num_microbatches_calculator import get_num_microbatches
2120
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
2221
from megatron.core.utils import divide
2322
from omegaconf.dictconfig import DictConfig
23+
from pytorch_lightning.trainer.trainer import Trainer
2424

2525
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
2626
from nemo.collections.nlp.modules.common.megatron.utils import (

nemo_aligner/models/nlp/gpt/megatron_gpt_kto_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616
from functools import partial
1717

1818
import torch
19-
from lightning.pytorch.trainer.trainer import Trainer
2019
from megatron.core.num_microbatches_calculator import get_num_microbatches
2120
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
2221
from megatron.core.utils import divide
2322
from omegaconf.dictconfig import DictConfig
23+
from pytorch_lightning.trainer.trainer import Trainer
2424

2525
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
2626
from nemo.collections.nlp.modules.common.megatron.utils import (

nemo_aligner/models/nlp/gpt/megatron_gpt_ppo_actor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,12 @@
1616

1717
import torch
1818
import torch.distributed
19-
from lightning.pytorch.trainer.trainer import Trainer
2019
from megatron.core.num_microbatches_calculator import get_num_microbatches
2120
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
2221
from megatron.core.utils import divide
2322
from omegaconf import OmegaConf
2423
from omegaconf.dictconfig import DictConfig
24+
from pytorch_lightning.trainer.trainer import Trainer
2525

2626
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
2727
from nemo.collections.nlp.modules.common.megatron.utils import (

nemo_aligner/models/nlp/gpt/megatron_gpt_regression_reward_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@
1414

1515

1616
import torch
17-
from lightning.pytorch.trainer.trainer import Trainer
1817
from megatron.core.num_microbatches_calculator import get_num_microbatches
1918
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
2019
from omegaconf.dictconfig import DictConfig
20+
from pytorch_lightning.trainer.trainer import Trainer
2121

2222
from nemo.collections.nlp.modules.common.megatron.utils import (
2323
average_losses_across_data_parallel_group,

nemo_aligner/models/nlp/gpt/megatron_gpt_reward_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@
1717
from typing import List, Tuple, Union
1818

1919
import torch
20-
from lightning.pytorch.trainer.trainer import Trainer
2120
from megatron.core.num_microbatches_calculator import get_num_microbatches
2221
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
2322
from megatron.core.utils import divide
2423
from omegaconf.dictconfig import DictConfig
24+
from pytorch_lightning.trainer.trainer import Trainer
2525

2626
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel, get_specs
2727
from nemo.collections.nlp.modules.common.megatron.utils import (

nemo_aligner/models/nlp/gpt/megatron_gpt_rs_actor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,13 @@
1515
from contextlib import nullcontext
1616

1717
import torch
18-
from lightning.pytorch.trainer.trainer import Trainer
1918
from megatron.core import parallel_state
2019
from megatron.core.num_microbatches_calculator import get_num_microbatches
2120
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
2221
from megatron.core.utils import divide
2322
from omegaconf import OmegaConf
2423
from omegaconf.dictconfig import DictConfig
24+
from pytorch_lightning.trainer.trainer import Trainer
2525

2626
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
2727
from nemo.collections.nlp.modules.common.megatron.utils import (

0 commit comments

Comments
 (0)